1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <generated/utsrelease.h>
13 #include "ice_dcb_lib.h"
14 #include "ice_dcb_nl.h"
15 #include "ice_devlink.h"
16 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
17 * ice tracepoint functions. This must be done exactly once across the
20 #define CREATE_TRACE_POINTS
21 #include "ice_trace.h"
22 #include "ice_eswitch.h"
23 #include "ice_tc_lib.h"
24 #include "ice_vsi_vlan_ops.h"
26 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
27 static const char ice_driver_string[] = DRV_SUMMARY;
28 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
30 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
31 #define ICE_DDP_PKG_PATH "intel/ice/ddp/"
32 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
34 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
35 MODULE_DESCRIPTION(DRV_SUMMARY);
36 MODULE_LICENSE("GPL v2");
37 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
39 static int debug = -1;
40 module_param(debug, int, 0644);
41 #ifndef CONFIG_DYNAMIC_DEBUG
42 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
44 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
45 #endif /* !CONFIG_DYNAMIC_DEBUG */
47 static DEFINE_IDA(ice_aux_ida);
48 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
49 EXPORT_SYMBOL(ice_xdp_locking_key);
52 * ice_hw_to_dev - Get device pointer from the hardware structure
53 * @hw: pointer to the device HW structure
55 * Used to access the device pointer from compilation units which can't easily
56 * include the definition of struct ice_pf without leading to circular header
59 struct device *ice_hw_to_dev(struct ice_hw *hw)
61 struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
63 return &pf->pdev->dev;
66 static struct workqueue_struct *ice_wq;
67 static const struct net_device_ops ice_netdev_safe_mode_ops;
68 static const struct net_device_ops ice_netdev_ops;
70 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
72 static void ice_vsi_release_all(struct ice_pf *pf);
74 static int ice_rebuild_channels(struct ice_pf *pf);
75 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
78 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
79 void *cb_priv, enum tc_setup_type type, void *type_data,
81 void (*cleanup)(struct flow_block_cb *block_cb));
83 bool netif_is_ice(struct net_device *dev)
85 return dev && (dev->netdev_ops == &ice_netdev_ops);
89 * ice_get_tx_pending - returns number of Tx descriptors not processed
90 * @ring: the ring of descriptors
92 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
96 head = ring->next_to_clean;
97 tail = ring->next_to_use;
100 return (head < tail) ?
101 tail - head : (tail + ring->count - head);
106 * ice_check_for_hang_subtask - check for and recover hung queues
107 * @pf: pointer to PF struct
109 static void ice_check_for_hang_subtask(struct ice_pf *pf)
111 struct ice_vsi *vsi = NULL;
117 ice_for_each_vsi(pf, v)
118 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
123 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
126 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
131 ice_for_each_txq(vsi, i) {
132 struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
133 struct ice_ring_stats *ring_stats;
137 if (ice_ring_ch_enabled(tx_ring))
140 ring_stats = tx_ring->ring_stats;
145 /* If packet counter has not changed the queue is
146 * likely stalled, so force an interrupt for this
149 * prev_pkt would be negative if there was no
152 packets = ring_stats->stats.pkts & INT_MAX;
153 if (ring_stats->tx_stats.prev_pkt == packets) {
154 /* Trigger sw interrupt to revive the queue */
155 ice_trigger_sw_intr(hw, tx_ring->q_vector);
159 /* Memory barrier between read of packet count and call
160 * to ice_get_tx_pending()
163 ring_stats->tx_stats.prev_pkt =
164 ice_get_tx_pending(tx_ring) ? packets : -1;
170 * ice_init_mac_fltr - Set initial MAC filters
171 * @pf: board private structure
173 * Set initial set of MAC filters for PF VSI; configure filters for permanent
174 * address and broadcast address. If an error is encountered, netdevice will be
177 static int ice_init_mac_fltr(struct ice_pf *pf)
182 vsi = ice_get_main_vsi(pf);
186 perm_addr = vsi->port_info->mac.perm_addr;
187 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
191 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
192 * @netdev: the net device on which the sync is happening
193 * @addr: MAC address to sync
195 * This is a callback function which is called by the in kernel device sync
196 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
197 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
198 * MAC filters from the hardware.
200 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
202 struct ice_netdev_priv *np = netdev_priv(netdev);
203 struct ice_vsi *vsi = np->vsi;
205 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
213 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
214 * @netdev: the net device on which the unsync is happening
215 * @addr: MAC address to unsync
217 * This is a callback function which is called by the in kernel device unsync
218 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
219 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
220 * delete the MAC filters from the hardware.
222 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
224 struct ice_netdev_priv *np = netdev_priv(netdev);
225 struct ice_vsi *vsi = np->vsi;
227 /* Under some circumstances, we might receive a request to delete our
228 * own device address from our uc list. Because we store the device
229 * address in the VSI's MAC filter list, we need to ignore such
230 * requests and not delete our device address from this list.
232 if (ether_addr_equal(addr, netdev->dev_addr))
235 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
243 * ice_vsi_fltr_changed - check if filter state changed
244 * @vsi: VSI to be checked
246 * returns true if filter state has changed, false otherwise.
248 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
250 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
251 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
255 * ice_set_promisc - Enable promiscuous mode for a given PF
256 * @vsi: the VSI being configured
257 * @promisc_m: mask of promiscuous config bits
260 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
264 if (vsi->type != ICE_VSI_PF)
267 if (ice_vsi_has_non_zero_vlans(vsi)) {
268 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
269 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
272 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
275 if (status && status != -EEXIST)
282 * ice_clear_promisc - Disable promiscuous mode for a given PF
283 * @vsi: the VSI being configured
284 * @promisc_m: mask of promiscuous config bits
287 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
291 if (vsi->type != ICE_VSI_PF)
294 if (ice_vsi_has_non_zero_vlans(vsi)) {
295 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
296 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
299 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
307 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
308 * @vsi: ptr to the VSI
310 * Push any outstanding VSI filter changes through the AdminQ.
312 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
314 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
315 struct device *dev = ice_pf_to_dev(vsi->back);
316 struct net_device *netdev = vsi->netdev;
317 bool promisc_forced_on = false;
318 struct ice_pf *pf = vsi->back;
319 struct ice_hw *hw = &pf->hw;
320 u32 changed_flags = 0;
326 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
327 usleep_range(1000, 2000);
329 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
330 vsi->current_netdev_flags = vsi->netdev->flags;
332 INIT_LIST_HEAD(&vsi->tmp_sync_list);
333 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
335 if (ice_vsi_fltr_changed(vsi)) {
336 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
337 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
339 /* grab the netdev's addr_list_lock */
340 netif_addr_lock_bh(netdev);
341 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
342 ice_add_mac_to_unsync_list);
343 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
344 ice_add_mac_to_unsync_list);
345 /* our temp lists are populated. release lock */
346 netif_addr_unlock_bh(netdev);
349 /* Remove MAC addresses in the unsync list */
350 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
351 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
353 netdev_err(netdev, "Failed to delete MAC filters\n");
354 /* if we failed because of alloc failures, just bail */
359 /* Add MAC addresses in the sync list */
360 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
361 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
362 /* If filter is added successfully or already exists, do not go into
363 * 'if' condition and report it as error. Instead continue processing
364 * rest of the function.
366 if (err && err != -EEXIST) {
367 netdev_err(netdev, "Failed to add MAC filters\n");
368 /* If there is no more space for new umac filters, VSI
369 * should go into promiscuous mode. There should be some
370 * space reserved for promiscuous filters.
372 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
373 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
375 promisc_forced_on = true;
376 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
383 /* check for changes in promiscuous modes */
384 if (changed_flags & IFF_ALLMULTI) {
385 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
386 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
388 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
392 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
393 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
395 vsi->current_netdev_flags |= IFF_ALLMULTI;
401 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
402 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
403 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
404 if (vsi->current_netdev_flags & IFF_PROMISC) {
405 /* Apply Rx filter rule to get traffic from wire */
406 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
407 err = ice_set_dflt_vsi(vsi);
408 if (err && err != -EEXIST) {
409 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
411 vsi->current_netdev_flags &=
416 vlan_ops->dis_rx_filtering(vsi);
419 /* Clear Rx filter to remove traffic from wire */
420 if (ice_is_vsi_dflt_vsi(vsi)) {
421 err = ice_clear_dflt_vsi(vsi);
423 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
425 vsi->current_netdev_flags |=
429 if (vsi->netdev->features &
430 NETIF_F_HW_VLAN_CTAG_FILTER)
431 vlan_ops->ena_rx_filtering(vsi);
438 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
441 /* if something went wrong then set the changed flag so we try again */
442 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
443 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
445 clear_bit(ICE_CFG_BUSY, vsi->state);
450 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
451 * @pf: board private structure
453 static void ice_sync_fltr_subtask(struct ice_pf *pf)
457 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
460 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
462 ice_for_each_vsi(pf, v)
463 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
464 ice_vsi_sync_fltr(pf->vsi[v])) {
465 /* come back and try again later */
466 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
472 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
474 * @locked: is the rtnl_lock already held
476 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
481 ice_for_each_vsi(pf, v)
483 ice_dis_vsi(pf->vsi[v], locked);
485 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
486 pf->pf_agg_node[node].num_vsis = 0;
488 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
489 pf->vf_agg_node[node].num_vsis = 0;
493 * ice_clear_sw_switch_recipes - clear switch recipes
494 * @pf: board private structure
496 * Mark switch recipes as not created in sw structures. There are cases where
497 * rules (especially advanced rules) need to be restored, either re-read from
498 * hardware or added again. For example after the reset. 'recp_created' flag
499 * prevents from doing that and need to be cleared upfront.
501 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
503 struct ice_sw_recipe *recp;
506 recp = pf->hw.switch_info->recp_list;
507 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
508 recp[i].recp_created = false;
512 * ice_prepare_for_reset - prep for reset
513 * @pf: board private structure
514 * @reset_type: reset type requested
516 * Inform or close all dependent features in prep for reset.
519 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
521 struct ice_hw *hw = &pf->hw;
526 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
528 /* already prepared for reset */
529 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
532 ice_unplug_aux_dev(pf);
534 /* Notify VFs of impending reset */
535 if (ice_check_sq_alive(hw, &hw->mailboxq))
536 ice_vc_notify_reset(pf);
538 /* Disable VFs until reset is completed */
539 mutex_lock(&pf->vfs.table_lock);
540 ice_for_each_vf(pf, bkt, vf)
541 ice_set_vf_state_qs_dis(vf);
542 mutex_unlock(&pf->vfs.table_lock);
544 if (ice_is_eswitch_mode_switchdev(pf)) {
545 if (reset_type != ICE_RESET_PFR)
546 ice_clear_sw_switch_recipes(pf);
549 /* release ADQ specific HW and SW resources */
550 vsi = ice_get_main_vsi(pf);
554 /* to be on safe side, reset orig_rss_size so that normal flow
555 * of deciding rss_size can take precedence
557 vsi->orig_rss_size = 0;
559 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
560 if (reset_type == ICE_RESET_PFR) {
561 vsi->old_ena_tc = vsi->all_enatc;
562 vsi->old_numtc = vsi->all_numtc;
564 ice_remove_q_channels(vsi, true);
566 /* for other reset type, do not support channel rebuild
567 * hence reset needed info
575 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
576 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
581 /* clear SW filtering DB */
582 ice_clear_hw_tbls(hw);
583 /* disable the VSIs and their queues that are not already DOWN */
584 ice_pf_dis_all_vsi(pf, false);
586 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
587 ice_ptp_prepare_for_reset(pf);
589 if (ice_is_feature_supported(pf, ICE_F_GNSS))
593 ice_sched_clear_port(hw->port_info);
595 ice_shutdown_all_ctrlq(hw);
597 set_bit(ICE_PREPARED_FOR_RESET, pf->state);
601 * ice_do_reset - Initiate one of many types of resets
602 * @pf: board private structure
603 * @reset_type: reset type requested before this function was called.
605 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
607 struct device *dev = ice_pf_to_dev(pf);
608 struct ice_hw *hw = &pf->hw;
610 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
612 ice_prepare_for_reset(pf, reset_type);
614 /* trigger the reset */
615 if (ice_reset(hw, reset_type)) {
616 dev_err(dev, "reset %d failed\n", reset_type);
617 set_bit(ICE_RESET_FAILED, pf->state);
618 clear_bit(ICE_RESET_OICR_RECV, pf->state);
619 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
620 clear_bit(ICE_PFR_REQ, pf->state);
621 clear_bit(ICE_CORER_REQ, pf->state);
622 clear_bit(ICE_GLOBR_REQ, pf->state);
623 wake_up(&pf->reset_wait_queue);
627 /* PFR is a bit of a special case because it doesn't result in an OICR
628 * interrupt. So for PFR, rebuild after the reset and clear the reset-
629 * associated state bits.
631 if (reset_type == ICE_RESET_PFR) {
633 ice_rebuild(pf, reset_type);
634 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
635 clear_bit(ICE_PFR_REQ, pf->state);
636 wake_up(&pf->reset_wait_queue);
637 ice_reset_all_vfs(pf);
642 * ice_reset_subtask - Set up for resetting the device and driver
643 * @pf: board private structure
645 static void ice_reset_subtask(struct ice_pf *pf)
647 enum ice_reset_req reset_type = ICE_RESET_INVAL;
649 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
650 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
651 * of reset is pending and sets bits in pf->state indicating the reset
652 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
653 * prepare for pending reset if not already (for PF software-initiated
654 * global resets the software should already be prepared for it as
655 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
656 * by firmware or software on other PFs, that bit is not set so prepare
657 * for the reset now), poll for reset done, rebuild and return.
659 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
660 /* Perform the largest reset requested */
661 if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
662 reset_type = ICE_RESET_CORER;
663 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
664 reset_type = ICE_RESET_GLOBR;
665 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
666 reset_type = ICE_RESET_EMPR;
667 /* return if no valid reset type requested */
668 if (reset_type == ICE_RESET_INVAL)
670 ice_prepare_for_reset(pf, reset_type);
672 /* make sure we are ready to rebuild */
673 if (ice_check_reset(&pf->hw)) {
674 set_bit(ICE_RESET_FAILED, pf->state);
676 /* done with reset. start rebuild */
677 pf->hw.reset_ongoing = false;
678 ice_rebuild(pf, reset_type);
679 /* clear bit to resume normal operations, but
680 * ICE_NEEDS_RESTART bit is set in case rebuild failed
682 clear_bit(ICE_RESET_OICR_RECV, pf->state);
683 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
684 clear_bit(ICE_PFR_REQ, pf->state);
685 clear_bit(ICE_CORER_REQ, pf->state);
686 clear_bit(ICE_GLOBR_REQ, pf->state);
687 wake_up(&pf->reset_wait_queue);
688 ice_reset_all_vfs(pf);
694 /* No pending resets to finish processing. Check for new resets */
695 if (test_bit(ICE_PFR_REQ, pf->state))
696 reset_type = ICE_RESET_PFR;
697 if (test_bit(ICE_CORER_REQ, pf->state))
698 reset_type = ICE_RESET_CORER;
699 if (test_bit(ICE_GLOBR_REQ, pf->state))
700 reset_type = ICE_RESET_GLOBR;
701 /* If no valid reset type requested just return */
702 if (reset_type == ICE_RESET_INVAL)
705 /* reset if not already down or busy */
706 if (!test_bit(ICE_DOWN, pf->state) &&
707 !test_bit(ICE_CFG_BUSY, pf->state)) {
708 ice_do_reset(pf, reset_type);
713 * ice_print_topo_conflict - print topology conflict message
714 * @vsi: the VSI whose topology status is being checked
716 static void ice_print_topo_conflict(struct ice_vsi *vsi)
718 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
719 case ICE_AQ_LINK_TOPO_CONFLICT:
720 case ICE_AQ_LINK_MEDIA_CONFLICT:
721 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
722 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
723 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
724 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
726 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
727 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
728 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
730 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
738 * ice_print_link_msg - print link up or down message
739 * @vsi: the VSI whose link status is being queried
740 * @isup: boolean for if the link is now up or down
742 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
744 struct ice_aqc_get_phy_caps_data *caps;
745 const char *an_advertised;
756 if (vsi->current_isup == isup)
759 vsi->current_isup = isup;
762 netdev_info(vsi->netdev, "NIC Link is Down\n");
766 switch (vsi->port_info->phy.link_info.link_speed) {
767 case ICE_AQ_LINK_SPEED_100GB:
770 case ICE_AQ_LINK_SPEED_50GB:
773 case ICE_AQ_LINK_SPEED_40GB:
776 case ICE_AQ_LINK_SPEED_25GB:
779 case ICE_AQ_LINK_SPEED_20GB:
782 case ICE_AQ_LINK_SPEED_10GB:
785 case ICE_AQ_LINK_SPEED_5GB:
788 case ICE_AQ_LINK_SPEED_2500MB:
791 case ICE_AQ_LINK_SPEED_1000MB:
794 case ICE_AQ_LINK_SPEED_100MB:
802 switch (vsi->port_info->fc.current_mode) {
806 case ICE_FC_TX_PAUSE:
809 case ICE_FC_RX_PAUSE:
820 /* Get FEC mode based on negotiated link info */
821 switch (vsi->port_info->phy.link_info.fec_info) {
822 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
823 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
826 case ICE_AQ_LINK_25G_KR_FEC_EN:
827 fec = "FC-FEC/BASE-R";
834 /* check if autoneg completed, might be false due to not supported */
835 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
840 /* Get FEC mode requested based on PHY caps last SW configuration */
841 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
844 an_advertised = "Unknown";
848 status = ice_aq_get_phy_caps(vsi->port_info, false,
849 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
851 netdev_info(vsi->netdev, "Get phy capability failed.\n");
853 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
855 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
856 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
858 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
859 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
860 fec_req = "FC-FEC/BASE-R";
867 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
868 speed, fec_req, fec, an_advertised, an, fc);
869 ice_print_topo_conflict(vsi);
873 * ice_vsi_link_event - update the VSI's netdev
874 * @vsi: the VSI on which the link event occurred
875 * @link_up: whether or not the VSI needs to be set up or down
877 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
882 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
885 if (vsi->type == ICE_VSI_PF) {
886 if (link_up == netif_carrier_ok(vsi->netdev))
890 netif_carrier_on(vsi->netdev);
891 netif_tx_wake_all_queues(vsi->netdev);
893 netif_carrier_off(vsi->netdev);
894 netif_tx_stop_all_queues(vsi->netdev);
900 * ice_set_dflt_mib - send a default config MIB to the FW
901 * @pf: private PF struct
903 * This function sends a default configuration MIB to the FW.
905 * If this function errors out at any point, the driver is still able to
906 * function. The main impact is that LFC may not operate as expected.
907 * Therefore an error state in this function should be treated with a DBG
908 * message and continue on with driver rebuild/reenable.
910 static void ice_set_dflt_mib(struct ice_pf *pf)
912 struct device *dev = ice_pf_to_dev(pf);
913 u8 mib_type, *buf, *lldpmib = NULL;
914 u16 len, typelen, offset = 0;
915 struct ice_lldp_org_tlv *tlv;
916 struct ice_hw *hw = &pf->hw;
919 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
920 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
922 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
927 /* Add ETS CFG TLV */
928 tlv = (struct ice_lldp_org_tlv *)lldpmib;
929 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
930 ICE_IEEE_ETS_TLV_LEN);
931 tlv->typelen = htons(typelen);
932 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
933 ICE_IEEE_SUBTYPE_ETS_CFG);
934 tlv->ouisubtype = htonl(ouisubtype);
939 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
940 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
941 * Octets 13 - 20 are TSA values - leave as zeros
944 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
946 tlv = (struct ice_lldp_org_tlv *)
947 ((char *)tlv + sizeof(tlv->typelen) + len);
949 /* Add ETS REC TLV */
951 tlv->typelen = htons(typelen);
953 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
954 ICE_IEEE_SUBTYPE_ETS_REC);
955 tlv->ouisubtype = htonl(ouisubtype);
957 /* First octet of buf is reserved
958 * Octets 1 - 4 map UP to TC - all UPs map to zero
959 * Octets 5 - 12 are BW values - set TC 0 to 100%.
960 * Octets 13 - 20 are TSA value - leave as zeros
964 tlv = (struct ice_lldp_org_tlv *)
965 ((char *)tlv + sizeof(tlv->typelen) + len);
967 /* Add PFC CFG TLV */
968 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
969 ICE_IEEE_PFC_TLV_LEN);
970 tlv->typelen = htons(typelen);
972 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
973 ICE_IEEE_SUBTYPE_PFC_CFG);
974 tlv->ouisubtype = htonl(ouisubtype);
976 /* Octet 1 left as all zeros - PFC disabled */
978 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
981 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
982 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
988 * ice_check_phy_fw_load - check if PHY FW load failed
989 * @pf: pointer to PF struct
990 * @link_cfg_err: bitmap from the link info structure
992 * check if external PHY FW load failed and print an error message if it did
994 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
996 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
997 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1001 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1004 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1005 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1006 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1011 * ice_check_module_power
1012 * @pf: pointer to PF struct
1013 * @link_cfg_err: bitmap from the link info structure
1015 * check module power level returned by a previous call to aq_get_link_info
1016 * and print error messages if module power level is not supported
1018 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1020 /* if module power level is supported, clear the flag */
1021 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1022 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1023 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1027 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1028 * above block didn't clear this bit, there's nothing to do
1030 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1033 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1034 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1035 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1036 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1037 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1038 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1043 * ice_check_link_cfg_err - check if link configuration failed
1044 * @pf: pointer to the PF struct
1045 * @link_cfg_err: bitmap from the link info structure
1047 * print if any link configuration failure happens due to the value in the
1048 * link_cfg_err parameter in the link info structure
1050 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1052 ice_check_module_power(pf, link_cfg_err);
1053 ice_check_phy_fw_load(pf, link_cfg_err);
1057 * ice_link_event - process the link event
1058 * @pf: PF that the link event is associated with
1059 * @pi: port_info for the port that the link event is associated with
1060 * @link_up: true if the physical link is up and false if it is down
1061 * @link_speed: current link speed received from the link event
1063 * Returns 0 on success and negative on failure
1066 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1069 struct device *dev = ice_pf_to_dev(pf);
1070 struct ice_phy_info *phy_info;
1071 struct ice_vsi *vsi;
1076 phy_info = &pi->phy;
1077 phy_info->link_info_old = phy_info->link_info;
1079 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1080 old_link_speed = phy_info->link_info_old.link_speed;
1082 /* update the link info structures and re-enable link events,
1083 * don't bail on failure due to other book keeping needed
1085 status = ice_update_link_info(pi);
1087 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1089 ice_aq_str(pi->hw->adminq.sq_last_status));
1091 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1093 /* Check if the link state is up after updating link info, and treat
1094 * this event as an UP event since the link is actually UP now.
1096 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1099 vsi = ice_get_main_vsi(pf);
1100 if (!vsi || !vsi->port_info)
1103 /* turn off PHY if media was removed */
1104 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1105 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1106 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1107 ice_set_link(vsi, false);
1110 /* if the old link up/down and speed is the same as the new */
1111 if (link_up == old_link && link_speed == old_link_speed)
1114 ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1116 if (ice_is_dcb_active(pf)) {
1117 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1118 ice_dcb_rebuild(pf);
1121 ice_set_dflt_mib(pf);
1123 ice_vsi_link_event(vsi, link_up);
1124 ice_print_link_msg(vsi, link_up);
1126 ice_vc_notify_link_state(pf);
1132 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1133 * @pf: board private structure
1135 static void ice_watchdog_subtask(struct ice_pf *pf)
1139 /* if interface is down do nothing */
1140 if (test_bit(ICE_DOWN, pf->state) ||
1141 test_bit(ICE_CFG_BUSY, pf->state))
1144 /* make sure we don't do these things too often */
1145 if (time_before(jiffies,
1146 pf->serv_tmr_prev + pf->serv_tmr_period))
1149 pf->serv_tmr_prev = jiffies;
1151 /* Update the stats for active netdevs so the network stack
1152 * can look at updated numbers whenever it cares to
1154 ice_update_pf_stats(pf);
1155 ice_for_each_vsi(pf, i)
1156 if (pf->vsi[i] && pf->vsi[i]->netdev)
1157 ice_update_vsi_stats(pf->vsi[i]);
1161 * ice_init_link_events - enable/initialize link events
1162 * @pi: pointer to the port_info instance
1164 * Returns -EIO on failure, 0 on success
1166 static int ice_init_link_events(struct ice_port_info *pi)
1170 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1171 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1172 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1174 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1175 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1180 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1181 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1190 * ice_handle_link_event - handle link event via ARQ
1191 * @pf: PF that the link event is associated with
1192 * @event: event structure containing link status info
1195 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1197 struct ice_aqc_get_link_status_data *link_data;
1198 struct ice_port_info *port_info;
1201 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1202 port_info = pf->hw.port_info;
1206 status = ice_link_event(pf, port_info,
1207 !!(link_data->link_info & ICE_AQ_LINK_UP),
1208 le16_to_cpu(link_data->link_speed));
1210 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1216 enum ice_aq_task_state {
1217 ICE_AQ_TASK_WAITING = 0,
1218 ICE_AQ_TASK_COMPLETE,
1219 ICE_AQ_TASK_CANCELED,
1222 struct ice_aq_task {
1223 struct hlist_node entry;
1226 struct ice_rq_event_info *event;
1227 enum ice_aq_task_state state;
1231 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1232 * @pf: pointer to the PF private structure
1233 * @opcode: the opcode to wait for
1234 * @timeout: how long to wait, in jiffies
1235 * @event: storage for the event info
1237 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1238 * current thread will be put to sleep until the specified event occurs or
1239 * until the given timeout is reached.
1241 * To obtain only the descriptor contents, pass an event without an allocated
1242 * msg_buf. If the complete data buffer is desired, allocate the
1243 * event->msg_buf with enough space ahead of time.
1245 * Returns: zero on success, or a negative error code on failure.
1247 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1248 struct ice_rq_event_info *event)
1250 struct device *dev = ice_pf_to_dev(pf);
1251 struct ice_aq_task *task;
1252 unsigned long start;
1256 task = kzalloc(sizeof(*task), GFP_KERNEL);
1260 INIT_HLIST_NODE(&task->entry);
1261 task->opcode = opcode;
1262 task->event = event;
1263 task->state = ICE_AQ_TASK_WAITING;
1265 spin_lock_bh(&pf->aq_wait_lock);
1266 hlist_add_head(&task->entry, &pf->aq_wait_list);
1267 spin_unlock_bh(&pf->aq_wait_lock);
1271 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1273 switch (task->state) {
1274 case ICE_AQ_TASK_WAITING:
1275 err = ret < 0 ? ret : -ETIMEDOUT;
1277 case ICE_AQ_TASK_CANCELED:
1278 err = ret < 0 ? ret : -ECANCELED;
1280 case ICE_AQ_TASK_COMPLETE:
1281 err = ret < 0 ? ret : 0;
1284 WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1289 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1290 jiffies_to_msecs(jiffies - start),
1291 jiffies_to_msecs(timeout),
1294 spin_lock_bh(&pf->aq_wait_lock);
1295 hlist_del(&task->entry);
1296 spin_unlock_bh(&pf->aq_wait_lock);
1303 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1304 * @pf: pointer to the PF private structure
1305 * @opcode: the opcode of the event
1306 * @event: the event to check
1308 * Loops over the current list of pending threads waiting for an AdminQ event.
1309 * For each matching task, copy the contents of the event into the task
1310 * structure and wake up the thread.
1312 * If multiple threads wait for the same opcode, they will all be woken up.
1314 * Note that event->msg_buf will only be duplicated if the event has a buffer
1315 * with enough space already allocated. Otherwise, only the descriptor and
1316 * message length will be copied.
1318 * Returns: true if an event was found, false otherwise
1320 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1321 struct ice_rq_event_info *event)
1323 struct ice_aq_task *task;
1326 spin_lock_bh(&pf->aq_wait_lock);
1327 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1328 if (task->state || task->opcode != opcode)
1331 memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1332 task->event->msg_len = event->msg_len;
1334 /* Only copy the data buffer if a destination was set */
1335 if (task->event->msg_buf &&
1336 task->event->buf_len > event->buf_len) {
1337 memcpy(task->event->msg_buf, event->msg_buf,
1339 task->event->buf_len = event->buf_len;
1342 task->state = ICE_AQ_TASK_COMPLETE;
1345 spin_unlock_bh(&pf->aq_wait_lock);
1348 wake_up(&pf->aq_wait_queue);
1352 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1353 * @pf: the PF private structure
1355 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1356 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1358 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1360 struct ice_aq_task *task;
1362 spin_lock_bh(&pf->aq_wait_lock);
1363 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1364 task->state = ICE_AQ_TASK_CANCELED;
1365 spin_unlock_bh(&pf->aq_wait_lock);
1367 wake_up(&pf->aq_wait_queue);
1371 * __ice_clean_ctrlq - helper function to clean controlq rings
1372 * @pf: ptr to struct ice_pf
1373 * @q_type: specific Control queue type
1375 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1377 struct device *dev = ice_pf_to_dev(pf);
1378 struct ice_rq_event_info event;
1379 struct ice_hw *hw = &pf->hw;
1380 struct ice_ctl_q_info *cq;
1385 /* Do not clean control queue if/when PF reset fails */
1386 if (test_bit(ICE_RESET_FAILED, pf->state))
1390 case ICE_CTL_Q_ADMIN:
1398 case ICE_CTL_Q_MAILBOX:
1401 /* we are going to try to detect a malicious VF, so set the
1402 * state to begin detection
1404 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1407 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1411 /* check for error indications - PF_xx_AxQLEN register layout for
1412 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1414 val = rd32(hw, cq->rq.len);
1415 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1416 PF_FW_ARQLEN_ARQCRIT_M)) {
1418 if (val & PF_FW_ARQLEN_ARQVFE_M)
1419 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1421 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1422 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1425 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1426 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1428 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1429 PF_FW_ARQLEN_ARQCRIT_M);
1431 wr32(hw, cq->rq.len, val);
1434 val = rd32(hw, cq->sq.len);
1435 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1436 PF_FW_ATQLEN_ATQCRIT_M)) {
1438 if (val & PF_FW_ATQLEN_ATQVFE_M)
1439 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1441 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1442 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1445 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1446 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1448 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1449 PF_FW_ATQLEN_ATQCRIT_M);
1451 wr32(hw, cq->sq.len, val);
1454 event.buf_len = cq->rq_buf_size;
1455 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1463 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1464 if (ret == -EALREADY)
1467 dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1472 opcode = le16_to_cpu(event.desc.opcode);
1474 /* Notify any thread that might be waiting for this event */
1475 ice_aq_check_events(pf, opcode, &event);
1478 case ice_aqc_opc_get_link_status:
1479 if (ice_handle_link_event(pf, &event))
1480 dev_err(dev, "Could not handle link event\n");
1482 case ice_aqc_opc_event_lan_overflow:
1483 ice_vf_lan_overflow_event(pf, &event);
1485 case ice_mbx_opc_send_msg_to_pf:
1486 if (!ice_is_malicious_vf(pf, &event, i, pending))
1487 ice_vc_process_vf_msg(pf, &event);
1489 case ice_aqc_opc_fw_logging:
1490 ice_output_fw_log(hw, &event.desc, event.msg_buf);
1492 case ice_aqc_opc_lldp_set_mib_change:
1493 ice_dcb_process_lldp_set_mib_change(pf, &event);
1496 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1500 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1502 kfree(event.msg_buf);
1504 return pending && (i == ICE_DFLT_IRQ_WORK);
1508 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1509 * @hw: pointer to hardware info
1510 * @cq: control queue information
1512 * returns true if there are pending messages in a queue, false if there aren't
1514 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1518 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1519 return cq->rq.next_to_clean != ntu;
1523 * ice_clean_adminq_subtask - clean the AdminQ rings
1524 * @pf: board private structure
1526 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1528 struct ice_hw *hw = &pf->hw;
1530 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1533 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1536 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1538 /* There might be a situation where new messages arrive to a control
1539 * queue between processing the last message and clearing the
1540 * EVENT_PENDING bit. So before exiting, check queue head again (using
1541 * ice_ctrlq_pending) and process new messages if any.
1543 if (ice_ctrlq_pending(hw, &hw->adminq))
1544 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1550 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1551 * @pf: board private structure
1553 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1555 struct ice_hw *hw = &pf->hw;
1557 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1560 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1563 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1565 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1566 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1572 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1573 * @pf: board private structure
1575 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1577 struct ice_hw *hw = &pf->hw;
1579 /* Nothing to do here if sideband queue is not supported */
1580 if (!ice_is_sbq_supported(hw)) {
1581 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1585 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1588 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1591 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1593 if (ice_ctrlq_pending(hw, &hw->sbq))
1594 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1600 * ice_service_task_schedule - schedule the service task to wake up
1601 * @pf: board private structure
1603 * If not already scheduled, this puts the task into the work queue.
1605 void ice_service_task_schedule(struct ice_pf *pf)
1607 if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1608 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1609 !test_bit(ICE_NEEDS_RESTART, pf->state))
1610 queue_work(ice_wq, &pf->serv_task);
1614 * ice_service_task_complete - finish up the service task
1615 * @pf: board private structure
1617 static void ice_service_task_complete(struct ice_pf *pf)
1619 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1621 /* force memory (pf->state) to sync before next service task */
1622 smp_mb__before_atomic();
1623 clear_bit(ICE_SERVICE_SCHED, pf->state);
1627 * ice_service_task_stop - stop service task and cancel works
1628 * @pf: board private structure
1630 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1633 static int ice_service_task_stop(struct ice_pf *pf)
1637 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1639 if (pf->serv_tmr.function)
1640 del_timer_sync(&pf->serv_tmr);
1641 if (pf->serv_task.func)
1642 cancel_work_sync(&pf->serv_task);
1644 clear_bit(ICE_SERVICE_SCHED, pf->state);
1649 * ice_service_task_restart - restart service task and schedule works
1650 * @pf: board private structure
1652 * This function is needed for suspend and resume works (e.g WoL scenario)
1654 static void ice_service_task_restart(struct ice_pf *pf)
1656 clear_bit(ICE_SERVICE_DIS, pf->state);
1657 ice_service_task_schedule(pf);
1661 * ice_service_timer - timer callback to schedule service task
1662 * @t: pointer to timer_list
1664 static void ice_service_timer(struct timer_list *t)
1666 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1668 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1669 ice_service_task_schedule(pf);
1673 * ice_handle_mdd_event - handle malicious driver detect event
1674 * @pf: pointer to the PF structure
1676 * Called from service task. OICR interrupt handler indicates MDD event.
1677 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1678 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1679 * disable the queue, the PF can be configured to reset the VF using ethtool
1680 * private flag mdd-auto-reset-vf.
1682 static void ice_handle_mdd_event(struct ice_pf *pf)
1684 struct device *dev = ice_pf_to_dev(pf);
1685 struct ice_hw *hw = &pf->hw;
1690 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1691 /* Since the VF MDD event logging is rate limited, check if
1692 * there are pending MDD events.
1694 ice_print_vfs_mdd_events(pf);
1698 /* find what triggered an MDD event */
1699 reg = rd32(hw, GL_MDET_TX_PQM);
1700 if (reg & GL_MDET_TX_PQM_VALID_M) {
1701 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1702 GL_MDET_TX_PQM_PF_NUM_S;
1703 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1704 GL_MDET_TX_PQM_VF_NUM_S;
1705 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1706 GL_MDET_TX_PQM_MAL_TYPE_S;
1707 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1708 GL_MDET_TX_PQM_QNUM_S);
1710 if (netif_msg_tx_err(pf))
1711 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1712 event, queue, pf_num, vf_num);
1713 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1716 reg = rd32(hw, GL_MDET_TX_TCLAN);
1717 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1718 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1719 GL_MDET_TX_TCLAN_PF_NUM_S;
1720 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1721 GL_MDET_TX_TCLAN_VF_NUM_S;
1722 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1723 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1724 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1725 GL_MDET_TX_TCLAN_QNUM_S);
1727 if (netif_msg_tx_err(pf))
1728 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1729 event, queue, pf_num, vf_num);
1730 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1733 reg = rd32(hw, GL_MDET_RX);
1734 if (reg & GL_MDET_RX_VALID_M) {
1735 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1736 GL_MDET_RX_PF_NUM_S;
1737 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1738 GL_MDET_RX_VF_NUM_S;
1739 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1740 GL_MDET_RX_MAL_TYPE_S;
1741 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1744 if (netif_msg_rx_err(pf))
1745 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1746 event, queue, pf_num, vf_num);
1747 wr32(hw, GL_MDET_RX, 0xffffffff);
1750 /* check to see if this PF caused an MDD event */
1751 reg = rd32(hw, PF_MDET_TX_PQM);
1752 if (reg & PF_MDET_TX_PQM_VALID_M) {
1753 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1754 if (netif_msg_tx_err(pf))
1755 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1758 reg = rd32(hw, PF_MDET_TX_TCLAN);
1759 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1760 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1761 if (netif_msg_tx_err(pf))
1762 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1765 reg = rd32(hw, PF_MDET_RX);
1766 if (reg & PF_MDET_RX_VALID_M) {
1767 wr32(hw, PF_MDET_RX, 0xFFFF);
1768 if (netif_msg_rx_err(pf))
1769 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1772 /* Check to see if one of the VFs caused an MDD event, and then
1773 * increment counters and set print pending
1775 mutex_lock(&pf->vfs.table_lock);
1776 ice_for_each_vf(pf, bkt, vf) {
1777 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1778 if (reg & VP_MDET_TX_PQM_VALID_M) {
1779 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1780 vf->mdd_tx_events.count++;
1781 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1782 if (netif_msg_tx_err(pf))
1783 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1787 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1788 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1789 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1790 vf->mdd_tx_events.count++;
1791 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1792 if (netif_msg_tx_err(pf))
1793 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1797 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1798 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1799 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1800 vf->mdd_tx_events.count++;
1801 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1802 if (netif_msg_tx_err(pf))
1803 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1807 reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1808 if (reg & VP_MDET_RX_VALID_M) {
1809 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1810 vf->mdd_rx_events.count++;
1811 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1812 if (netif_msg_rx_err(pf))
1813 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1816 /* Since the queue is disabled on VF Rx MDD events, the
1817 * PF can be configured to reset the VF through ethtool
1818 * private flag mdd-auto-reset-vf.
1820 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1821 /* VF MDD event counters will be cleared by
1822 * reset, so print the event prior to reset.
1824 ice_print_vf_rx_mdd_event(vf);
1825 ice_reset_vf(vf, ICE_VF_RESET_LOCK);
1829 mutex_unlock(&pf->vfs.table_lock);
1831 ice_print_vfs_mdd_events(pf);
1835 * ice_force_phys_link_state - Force the physical link state
1836 * @vsi: VSI to force the physical link state to up/down
1837 * @link_up: true/false indicates to set the physical link to up/down
1839 * Force the physical link state by getting the current PHY capabilities from
1840 * hardware and setting the PHY config based on the determined capabilities. If
1841 * link changes a link event will be triggered because both the Enable Automatic
1842 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1844 * Returns 0 on success, negative on failure
1846 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1848 struct ice_aqc_get_phy_caps_data *pcaps;
1849 struct ice_aqc_set_phy_cfg_data *cfg;
1850 struct ice_port_info *pi;
1854 if (!vsi || !vsi->port_info || !vsi->back)
1856 if (vsi->type != ICE_VSI_PF)
1859 dev = ice_pf_to_dev(vsi->back);
1861 pi = vsi->port_info;
1863 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1867 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1870 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1871 vsi->vsi_num, retcode);
1876 /* No change in link */
1877 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1878 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1881 /* Use the current user PHY configuration. The current user PHY
1882 * configuration is initialized during probe from PHY capabilities
1883 * software mode, and updated on set PHY configuration.
1885 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1891 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1893 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1895 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1897 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1899 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1900 vsi->vsi_num, retcode);
1911 * ice_init_nvm_phy_type - Initialize the NVM PHY type
1912 * @pi: port info structure
1914 * Initialize nvm_phy_type_[low|high] for link lenient mode support
1916 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1918 struct ice_aqc_get_phy_caps_data *pcaps;
1919 struct ice_pf *pf = pi->hw->back;
1922 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1926 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
1930 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1934 pf->nvm_phy_type_hi = pcaps->phy_type_high;
1935 pf->nvm_phy_type_lo = pcaps->phy_type_low;
1943 * ice_init_link_dflt_override - Initialize link default override
1944 * @pi: port info structure
1946 * Initialize link default override and PHY total port shutdown during probe
1948 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1950 struct ice_link_default_override_tlv *ldo;
1951 struct ice_pf *pf = pi->hw->back;
1953 ldo = &pf->link_dflt_override;
1954 if (ice_get_link_default_override(ldo, pi))
1957 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1960 /* Enable Total Port Shutdown (override/replace link-down-on-close
1961 * ethtool private flag) for ports with Port Disable bit set.
1963 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1964 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1968 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1969 * @pi: port info structure
1971 * If default override is enabled, initialize the user PHY cfg speed and FEC
1972 * settings using the default override mask from the NVM.
1974 * The PHY should only be configured with the default override settings the
1975 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1976 * is used to indicate that the user PHY cfg default override is initialized
1977 * and the PHY has not been configured with the default override settings. The
1978 * state is set here, and cleared in ice_configure_phy the first time the PHY is
1981 * This function should be called only if the FW doesn't support default
1982 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
1984 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1986 struct ice_link_default_override_tlv *ldo;
1987 struct ice_aqc_set_phy_cfg_data *cfg;
1988 struct ice_phy_info *phy = &pi->phy;
1989 struct ice_pf *pf = pi->hw->back;
1991 ldo = &pf->link_dflt_override;
1993 /* If link default override is enabled, use to mask NVM PHY capabilities
1994 * for speed and FEC default configuration.
1996 cfg = &phy->curr_user_phy_cfg;
1998 if (ldo->phy_type_low || ldo->phy_type_high) {
1999 cfg->phy_type_low = pf->nvm_phy_type_lo &
2000 cpu_to_le64(ldo->phy_type_low);
2001 cfg->phy_type_high = pf->nvm_phy_type_hi &
2002 cpu_to_le64(ldo->phy_type_high);
2004 cfg->link_fec_opt = ldo->fec_options;
2005 phy->curr_user_fec_req = ICE_FEC_AUTO;
2007 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2011 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2012 * @pi: port info structure
2014 * Initialize the current user PHY configuration, speed, FEC, and FC requested
2015 * mode to default. The PHY defaults are from get PHY capabilities topology
2016 * with media so call when media is first available. An error is returned if
2017 * called when media is not available. The PHY initialization completed state is
2020 * These configurations are used when setting PHY
2021 * configuration. The user PHY configuration is updated on set PHY
2022 * configuration. Returns 0 on success, negative on failure
2024 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2026 struct ice_aqc_get_phy_caps_data *pcaps;
2027 struct ice_phy_info *phy = &pi->phy;
2028 struct ice_pf *pf = pi->hw->back;
2031 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2034 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2038 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2039 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2042 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2045 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2049 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2051 /* check if lenient mode is supported and enabled */
2052 if (ice_fw_supports_link_override(pi->hw) &&
2053 !(pcaps->module_compliance_enforcement &
2054 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2055 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2057 /* if the FW supports default PHY configuration mode, then the driver
2058 * does not have to apply link override settings. If not,
2059 * initialize user PHY configuration with link override values
2061 if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2062 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2063 ice_init_phy_cfg_dflt_override(pi);
2068 /* if link default override is not enabled, set user flow control and
2069 * FEC settings based on what get_phy_caps returned
2071 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2072 pcaps->link_fec_options);
2073 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2076 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2077 set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2084 * ice_configure_phy - configure PHY
2087 * Set the PHY configuration. If the current PHY configuration is the same as
2088 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2089 * configure the based get PHY capabilities for topology with media.
2091 static int ice_configure_phy(struct ice_vsi *vsi)
2093 struct device *dev = ice_pf_to_dev(vsi->back);
2094 struct ice_port_info *pi = vsi->port_info;
2095 struct ice_aqc_get_phy_caps_data *pcaps;
2096 struct ice_aqc_set_phy_cfg_data *cfg;
2097 struct ice_phy_info *phy = &pi->phy;
2098 struct ice_pf *pf = vsi->back;
2101 /* Ensure we have media as we cannot configure a medialess port */
2102 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2105 ice_print_topo_conflict(vsi);
2107 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2108 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2111 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2112 return ice_force_phys_link_state(vsi, true);
2114 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2118 /* Get current PHY config */
2119 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2122 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2127 /* If PHY enable link is configured and configuration has not changed,
2128 * there's nothing to do
2130 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2131 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2134 /* Use PHY topology as baseline for configuration */
2135 memset(pcaps, 0, sizeof(*pcaps));
2136 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2137 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2140 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2143 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2148 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2154 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2156 /* Speed - If default override pending, use curr_user_phy_cfg set in
2157 * ice_init_phy_user_cfg_ldo.
2159 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2160 vsi->back->state)) {
2161 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2162 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2164 u64 phy_low = 0, phy_high = 0;
2166 ice_update_phy_type(&phy_low, &phy_high,
2167 pi->phy.curr_user_speed_req);
2168 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2169 cfg->phy_type_high = pcaps->phy_type_high &
2170 cpu_to_le64(phy_high);
2173 /* Can't provide what was requested; use PHY capabilities */
2174 if (!cfg->phy_type_low && !cfg->phy_type_high) {
2175 cfg->phy_type_low = pcaps->phy_type_low;
2176 cfg->phy_type_high = pcaps->phy_type_high;
2180 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2182 /* Can't provide what was requested; use PHY capabilities */
2183 if (cfg->link_fec_opt !=
2184 (cfg->link_fec_opt & pcaps->link_fec_options)) {
2185 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2186 cfg->link_fec_opt = pcaps->link_fec_options;
2189 /* Flow Control - always supported; no need to check against
2192 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2194 /* Enable link and link update */
2195 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2197 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2199 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2209 * ice_check_media_subtask - Check for media
2210 * @pf: pointer to PF struct
2212 * If media is available, then initialize PHY user configuration if it is not
2213 * been, and configure the PHY if the interface is up.
2215 static void ice_check_media_subtask(struct ice_pf *pf)
2217 struct ice_port_info *pi;
2218 struct ice_vsi *vsi;
2221 /* No need to check for media if it's already present */
2222 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2225 vsi = ice_get_main_vsi(pf);
2229 /* Refresh link info and check if media is present */
2230 pi = vsi->port_info;
2231 err = ice_update_link_info(pi);
2235 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2237 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2238 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2239 ice_init_phy_user_cfg(pi);
2241 /* PHY settings are reset on media insertion, reconfigure
2242 * PHY to preserve settings.
2244 if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2245 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2248 err = ice_configure_phy(vsi);
2250 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2252 /* A Link Status Event will be generated; the event handler
2253 * will complete bringing the interface up
2259 * ice_service_task - manage and run subtasks
2260 * @work: pointer to work_struct contained by the PF struct
2262 static void ice_service_task(struct work_struct *work)
2264 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2265 unsigned long start_time = jiffies;
2269 /* process reset requests first */
2270 ice_reset_subtask(pf);
2272 /* bail if a reset/recovery cycle is pending or rebuild failed */
2273 if (ice_is_reset_in_progress(pf->state) ||
2274 test_bit(ICE_SUSPENDED, pf->state) ||
2275 test_bit(ICE_NEEDS_RESTART, pf->state)) {
2276 ice_service_task_complete(pf);
2280 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2281 struct iidc_event *event;
2283 event = kzalloc(sizeof(*event), GFP_KERNEL);
2285 set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2286 /* report the entire OICR value to AUX driver */
2287 swap(event->reg, pf->oicr_err_reg);
2288 ice_send_event_to_aux(pf, event);
2293 if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
2294 /* Plug aux device per request */
2295 ice_plug_aux_dev(pf);
2297 /* Mark plugging as done but check whether unplug was
2298 * requested during ice_plug_aux_dev() call
2299 * (e.g. from ice_clear_rdma_cap()) and if so then
2302 if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2303 ice_unplug_aux_dev(pf);
2306 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2307 struct iidc_event *event;
2309 event = kzalloc(sizeof(*event), GFP_KERNEL);
2311 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2312 ice_send_event_to_aux(pf, event);
2317 ice_clean_adminq_subtask(pf);
2318 ice_check_media_subtask(pf);
2319 ice_check_for_hang_subtask(pf);
2320 ice_sync_fltr_subtask(pf);
2321 ice_handle_mdd_event(pf);
2322 ice_watchdog_subtask(pf);
2324 if (ice_is_safe_mode(pf)) {
2325 ice_service_task_complete(pf);
2329 ice_process_vflr_event(pf);
2330 ice_clean_mailboxq_subtask(pf);
2331 ice_clean_sbq_subtask(pf);
2332 ice_sync_arfs_fltrs(pf);
2333 ice_flush_fdir_ctx(pf);
2335 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2336 ice_service_task_complete(pf);
2338 /* If the tasks have taken longer than one service timer period
2339 * or there is more work to be done, reset the service timer to
2340 * schedule the service task now.
2342 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2343 test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2344 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2345 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2346 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2347 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2348 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2349 mod_timer(&pf->serv_tmr, jiffies);
2353 * ice_set_ctrlq_len - helper function to set controlq length
2354 * @hw: pointer to the HW instance
2356 static void ice_set_ctrlq_len(struct ice_hw *hw)
2358 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2359 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2360 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2361 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2362 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2363 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2364 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2365 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2366 hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2367 hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2368 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2369 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2373 * ice_schedule_reset - schedule a reset
2374 * @pf: board private structure
2375 * @reset: reset being requested
2377 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2379 struct device *dev = ice_pf_to_dev(pf);
2381 /* bail out if earlier reset has failed */
2382 if (test_bit(ICE_RESET_FAILED, pf->state)) {
2383 dev_dbg(dev, "earlier reset has failed\n");
2386 /* bail if reset/recovery already in progress */
2387 if (ice_is_reset_in_progress(pf->state)) {
2388 dev_dbg(dev, "Reset already in progress\n");
2394 set_bit(ICE_PFR_REQ, pf->state);
2396 case ICE_RESET_CORER:
2397 set_bit(ICE_CORER_REQ, pf->state);
2399 case ICE_RESET_GLOBR:
2400 set_bit(ICE_GLOBR_REQ, pf->state);
2406 ice_service_task_schedule(pf);
2411 * ice_irq_affinity_notify - Callback for affinity changes
2412 * @notify: context as to what irq was changed
2413 * @mask: the new affinity mask
2415 * This is a callback function used by the irq_set_affinity_notifier function
2416 * so that we may register to receive changes to the irq affinity masks.
2419 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2420 const cpumask_t *mask)
2422 struct ice_q_vector *q_vector =
2423 container_of(notify, struct ice_q_vector, affinity_notify);
2425 cpumask_copy(&q_vector->affinity_mask, mask);
2429 * ice_irq_affinity_release - Callback for affinity notifier release
2430 * @ref: internal core kernel usage
2432 * This is a callback function used by the irq_set_affinity_notifier function
2433 * to inform the current notification subscriber that they will no longer
2434 * receive notifications.
2436 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2439 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2440 * @vsi: the VSI being configured
2442 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2444 struct ice_hw *hw = &vsi->back->hw;
2447 ice_for_each_q_vector(vsi, i)
2448 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2455 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2456 * @vsi: the VSI being configured
2457 * @basename: name for the vector
2459 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2461 int q_vectors = vsi->num_q_vectors;
2462 struct ice_pf *pf = vsi->back;
2463 int base = vsi->base_vector;
2470 dev = ice_pf_to_dev(pf);
2471 for (vector = 0; vector < q_vectors; vector++) {
2472 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2474 irq_num = pf->msix_entries[base + vector].vector;
2476 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2477 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2478 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2480 } else if (q_vector->rx.rx_ring) {
2481 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2482 "%s-%s-%d", basename, "rx", rx_int_idx++);
2483 } else if (q_vector->tx.tx_ring) {
2484 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2485 "%s-%s-%d", basename, "tx", tx_int_idx++);
2487 /* skip this unused q_vector */
2490 if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2491 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2492 IRQF_SHARED, q_vector->name,
2495 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2496 0, q_vector->name, q_vector);
2498 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2503 /* register for affinity change notifications */
2504 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2505 struct irq_affinity_notify *affinity_notify;
2507 affinity_notify = &q_vector->affinity_notify;
2508 affinity_notify->notify = ice_irq_affinity_notify;
2509 affinity_notify->release = ice_irq_affinity_release;
2510 irq_set_affinity_notifier(irq_num, affinity_notify);
2513 /* assign the mask for this irq */
2514 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2517 err = ice_set_cpu_rx_rmap(vsi);
2519 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2520 vsi->vsi_num, ERR_PTR(err));
2524 vsi->irqs_ready = true;
2530 irq_num = pf->msix_entries[base + vector].vector;
2531 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2532 irq_set_affinity_notifier(irq_num, NULL);
2533 irq_set_affinity_hint(irq_num, NULL);
2534 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2540 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2541 * @vsi: VSI to setup Tx rings used by XDP
2543 * Return 0 on success and negative value on error
2545 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2547 struct device *dev = ice_pf_to_dev(vsi->back);
2548 struct ice_tx_desc *tx_desc;
2551 ice_for_each_xdp_txq(vsi, i) {
2552 u16 xdp_q_idx = vsi->alloc_txq + i;
2553 struct ice_ring_stats *ring_stats;
2554 struct ice_tx_ring *xdp_ring;
2556 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2558 goto free_xdp_rings;
2560 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2562 ice_free_tx_ring(xdp_ring);
2563 goto free_xdp_rings;
2566 xdp_ring->ring_stats = ring_stats;
2567 xdp_ring->q_index = xdp_q_idx;
2568 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2569 xdp_ring->vsi = vsi;
2570 xdp_ring->netdev = NULL;
2571 xdp_ring->dev = dev;
2572 xdp_ring->count = vsi->num_tx_desc;
2573 xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1;
2574 xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1;
2575 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2576 if (ice_setup_tx_ring(xdp_ring))
2577 goto free_xdp_rings;
2578 ice_set_ring_xdp(xdp_ring);
2579 spin_lock_init(&xdp_ring->tx_lock);
2580 for (j = 0; j < xdp_ring->count; j++) {
2581 tx_desc = ICE_TX_DESC(xdp_ring, j);
2582 tx_desc->cmd_type_offset_bsz = 0;
2589 for (; i >= 0; i--) {
2590 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2591 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2592 vsi->xdp_rings[i]->ring_stats = NULL;
2593 ice_free_tx_ring(vsi->xdp_rings[i]);
2600 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2601 * @vsi: VSI to set the bpf prog on
2602 * @prog: the bpf prog pointer
2604 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2606 struct bpf_prog *old_prog;
2609 old_prog = xchg(&vsi->xdp_prog, prog);
2611 bpf_prog_put(old_prog);
2613 ice_for_each_rxq(vsi, i)
2614 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2618 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2619 * @vsi: VSI to bring up Tx rings used by XDP
2620 * @prog: bpf program that will be assigned to VSI
2622 * Return 0 on success and negative value on error
2624 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2626 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2627 int xdp_rings_rem = vsi->num_xdp_txq;
2628 struct ice_pf *pf = vsi->back;
2629 struct ice_qs_cfg xdp_qs_cfg = {
2630 .qs_mutex = &pf->avail_q_mutex,
2631 .pf_map = pf->avail_txqs,
2632 .pf_map_size = pf->max_pf_txqs,
2633 .q_count = vsi->num_xdp_txq,
2634 .scatter_count = ICE_MAX_SCATTER_TXQS,
2635 .vsi_map = vsi->txq_map,
2636 .vsi_map_offset = vsi->alloc_txq,
2637 .mapping_mode = ICE_VSI_MAP_CONTIG
2643 dev = ice_pf_to_dev(pf);
2644 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2645 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2646 if (!vsi->xdp_rings)
2649 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2650 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2653 if (static_key_enabled(&ice_xdp_locking_key))
2654 netdev_warn(vsi->netdev,
2655 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2657 if (ice_xdp_alloc_setup_rings(vsi))
2658 goto clear_xdp_rings;
2660 /* follow the logic from ice_vsi_map_rings_to_vectors */
2661 ice_for_each_q_vector(vsi, v_idx) {
2662 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2663 int xdp_rings_per_v, q_id, q_base;
2665 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2666 vsi->num_q_vectors - v_idx);
2667 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2669 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2670 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2672 xdp_ring->q_vector = q_vector;
2673 xdp_ring->next = q_vector->tx.tx_ring;
2674 q_vector->tx.tx_ring = xdp_ring;
2676 xdp_rings_rem -= xdp_rings_per_v;
2679 ice_for_each_rxq(vsi, i) {
2680 if (static_key_enabled(&ice_xdp_locking_key)) {
2681 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2683 struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2684 struct ice_tx_ring *ring;
2686 ice_for_each_tx_ring(ring, q_vector->tx) {
2687 if (ice_ring_is_xdp(ring)) {
2688 vsi->rx_rings[i]->xdp_ring = ring;
2693 ice_tx_xsk_pool(vsi, i);
2696 /* omit the scheduler update if in reset path; XDP queues will be
2697 * taken into account at the end of ice_vsi_rebuild, where
2698 * ice_cfg_vsi_lan is being called
2700 if (ice_is_reset_in_progress(pf->state))
2703 /* tell the Tx scheduler that right now we have
2706 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2707 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2709 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2712 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2714 goto clear_xdp_rings;
2717 /* assign the prog only when it's not already present on VSI;
2718 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2719 * VSI rebuild that happens under ethtool -L can expose us to
2720 * the bpf_prog refcount issues as we would be swapping same
2721 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2722 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2723 * this is not harmful as dev_xdp_install bumps the refcount
2724 * before calling the op exposed by the driver;
2726 if (!ice_is_xdp_ena_vsi(vsi))
2727 ice_vsi_assign_bpf_prog(vsi, prog);
2731 ice_for_each_xdp_txq(vsi, i)
2732 if (vsi->xdp_rings[i]) {
2733 kfree_rcu(vsi->xdp_rings[i], rcu);
2734 vsi->xdp_rings[i] = NULL;
2738 mutex_lock(&pf->avail_q_mutex);
2739 ice_for_each_xdp_txq(vsi, i) {
2740 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2741 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2743 mutex_unlock(&pf->avail_q_mutex);
2745 devm_kfree(dev, vsi->xdp_rings);
2750 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2751 * @vsi: VSI to remove XDP rings
2753 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2756 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2758 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2759 struct ice_pf *pf = vsi->back;
2762 /* q_vectors are freed in reset path so there's no point in detaching
2763 * rings; in case of rebuild being triggered not from reset bits
2764 * in pf->state won't be set, so additionally check first q_vector
2767 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2770 ice_for_each_q_vector(vsi, v_idx) {
2771 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2772 struct ice_tx_ring *ring;
2774 ice_for_each_tx_ring(ring, q_vector->tx)
2775 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2778 /* restore the value of last node prior to XDP setup */
2779 q_vector->tx.tx_ring = ring;
2783 mutex_lock(&pf->avail_q_mutex);
2784 ice_for_each_xdp_txq(vsi, i) {
2785 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2786 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2788 mutex_unlock(&pf->avail_q_mutex);
2790 ice_for_each_xdp_txq(vsi, i)
2791 if (vsi->xdp_rings[i]) {
2792 if (vsi->xdp_rings[i]->desc) {
2794 ice_free_tx_ring(vsi->xdp_rings[i]);
2796 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2797 vsi->xdp_rings[i]->ring_stats = NULL;
2798 kfree_rcu(vsi->xdp_rings[i], rcu);
2799 vsi->xdp_rings[i] = NULL;
2802 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2803 vsi->xdp_rings = NULL;
2805 if (static_key_enabled(&ice_xdp_locking_key))
2806 static_branch_dec(&ice_xdp_locking_key);
2808 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2811 ice_vsi_assign_bpf_prog(vsi, NULL);
2813 /* notify Tx scheduler that we destroyed XDP queues and bring
2814 * back the old number of child nodes
2816 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2817 max_txqs[i] = vsi->num_txq;
2819 /* change number of XDP Tx queues to 0 */
2820 vsi->num_xdp_txq = 0;
2822 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2827 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2828 * @vsi: VSI to schedule napi on
2830 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2834 ice_for_each_rxq(vsi, i) {
2835 struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2837 if (rx_ring->xsk_pool)
2838 napi_schedule(&rx_ring->q_vector->napi);
2843 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2844 * @vsi: VSI to determine the count of XDP Tx qs
2846 * returns 0 if Tx qs count is higher than at least half of CPU count,
2849 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2851 u16 avail = ice_get_avail_txq_count(vsi->back);
2852 u16 cpus = num_possible_cpus();
2854 if (avail < cpus / 2)
2857 vsi->num_xdp_txq = min_t(u16, avail, cpus);
2859 if (vsi->num_xdp_txq < cpus)
2860 static_branch_inc(&ice_xdp_locking_key);
2866 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2867 * @vsi: VSI to setup XDP for
2868 * @prog: XDP program
2869 * @extack: netlink extended ack
2872 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2873 struct netlink_ext_ack *extack)
2875 int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2876 bool if_running = netif_running(vsi->netdev);
2877 int ret = 0, xdp_ring_err = 0;
2879 if (frame_size > vsi->rx_buf_len) {
2880 NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2884 /* need to stop netdev while setting up the program for Rx rings */
2885 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2886 ret = ice_down(vsi);
2888 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2893 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2894 xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2896 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2898 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2900 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2902 /* reallocate Rx queues that are used for zero-copy */
2903 xdp_ring_err = ice_realloc_zc_buf(vsi, true);
2905 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
2906 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2907 xdp_ring_err = ice_destroy_xdp_rings(vsi);
2909 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2910 /* reallocate Rx queues that were used for zero-copy */
2911 xdp_ring_err = ice_realloc_zc_buf(vsi, false);
2913 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
2915 /* safe to call even when prog == vsi->xdp_prog as
2916 * dev_xdp_install in net/core/dev.c incremented prog's
2917 * refcount so corresponding bpf_prog_put won't cause
2920 ice_vsi_assign_bpf_prog(vsi, prog);
2927 ice_vsi_rx_napi_schedule(vsi);
2929 return (ret || xdp_ring_err) ? -ENOMEM : 0;
2933 * ice_xdp_safe_mode - XDP handler for safe mode
2937 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2938 struct netdev_bpf *xdp)
2940 NL_SET_ERR_MSG_MOD(xdp->extack,
2941 "Please provide working DDP firmware package in order to use XDP\n"
2942 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2947 * ice_xdp - implements XDP handler
2951 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2953 struct ice_netdev_priv *np = netdev_priv(dev);
2954 struct ice_vsi *vsi = np->vsi;
2956 if (vsi->type != ICE_VSI_PF) {
2957 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2961 switch (xdp->command) {
2962 case XDP_SETUP_PROG:
2963 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2964 case XDP_SETUP_XSK_POOL:
2965 return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2973 * ice_ena_misc_vector - enable the non-queue interrupts
2974 * @pf: board private structure
2976 static void ice_ena_misc_vector(struct ice_pf *pf)
2978 struct ice_hw *hw = &pf->hw;
2981 /* Disable anti-spoof detection interrupt to prevent spurious event
2982 * interrupts during a function reset. Anti-spoof functionally is
2985 val = rd32(hw, GL_MDCK_TX_TDPU);
2986 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2987 wr32(hw, GL_MDCK_TX_TDPU, val);
2989 /* clear things first */
2990 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
2991 rd32(hw, PFINT_OICR); /* read to clear */
2993 val = (PFINT_OICR_ECC_ERR_M |
2994 PFINT_OICR_MAL_DETECT_M |
2996 PFINT_OICR_PCI_EXCEPTION_M |
2998 PFINT_OICR_HMC_ERR_M |
2999 PFINT_OICR_PE_PUSH_M |
3000 PFINT_OICR_PE_CRITERR_M);
3002 wr32(hw, PFINT_OICR_ENA, val);
3004 /* SW_ITR_IDX = 0, but don't change INTENA */
3005 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
3006 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3010 * ice_misc_intr - misc interrupt handler
3011 * @irq: interrupt number
3012 * @data: pointer to a q_vector
3014 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3016 struct ice_pf *pf = (struct ice_pf *)data;
3017 struct ice_hw *hw = &pf->hw;
3018 irqreturn_t ret = IRQ_NONE;
3022 dev = ice_pf_to_dev(pf);
3023 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3024 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3025 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3027 oicr = rd32(hw, PFINT_OICR);
3028 ena_mask = rd32(hw, PFINT_OICR_ENA);
3030 if (oicr & PFINT_OICR_SWINT_M) {
3031 ena_mask &= ~PFINT_OICR_SWINT_M;
3035 if (oicr & PFINT_OICR_MAL_DETECT_M) {
3036 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3037 set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3039 if (oicr & PFINT_OICR_VFLR_M) {
3040 /* disable any further VFLR event notifications */
3041 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3042 u32 reg = rd32(hw, PFINT_OICR_ENA);
3044 reg &= ~PFINT_OICR_VFLR_M;
3045 wr32(hw, PFINT_OICR_ENA, reg);
3047 ena_mask &= ~PFINT_OICR_VFLR_M;
3048 set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3052 if (oicr & PFINT_OICR_GRST_M) {
3055 /* we have a reset warning */
3056 ena_mask &= ~PFINT_OICR_GRST_M;
3057 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
3058 GLGEN_RSTAT_RESET_TYPE_S;
3060 if (reset == ICE_RESET_CORER)
3062 else if (reset == ICE_RESET_GLOBR)
3064 else if (reset == ICE_RESET_EMPR)
3067 dev_dbg(dev, "Invalid reset type %d\n", reset);
3069 /* If a reset cycle isn't already in progress, we set a bit in
3070 * pf->state so that the service task can start a reset/rebuild.
3072 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3073 if (reset == ICE_RESET_CORER)
3074 set_bit(ICE_CORER_RECV, pf->state);
3075 else if (reset == ICE_RESET_GLOBR)
3076 set_bit(ICE_GLOBR_RECV, pf->state);
3078 set_bit(ICE_EMPR_RECV, pf->state);
3080 /* There are couple of different bits at play here.
3081 * hw->reset_ongoing indicates whether the hardware is
3082 * in reset. This is set to true when a reset interrupt
3083 * is received and set back to false after the driver
3084 * has determined that the hardware is out of reset.
3086 * ICE_RESET_OICR_RECV in pf->state indicates
3087 * that a post reset rebuild is required before the
3088 * driver is operational again. This is set above.
3090 * As this is the start of the reset/rebuild cycle, set
3091 * both to indicate that.
3093 hw->reset_ongoing = true;
3097 if (oicr & PFINT_OICR_TSYN_TX_M) {
3098 ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3099 if (!hw->reset_ongoing)
3100 ret = IRQ_WAKE_THREAD;
3103 if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3104 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3105 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3107 /* Save EVENTs from GTSYN register */
3108 pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
3109 GLTSYN_STAT_EVENT1_M |
3110 GLTSYN_STAT_EVENT2_M);
3111 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3112 kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
3115 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3116 if (oicr & ICE_AUX_CRIT_ERR) {
3117 pf->oicr_err_reg |= oicr;
3118 set_bit(ICE_AUX_ERR_PENDING, pf->state);
3119 ena_mask &= ~ICE_AUX_CRIT_ERR;
3122 /* Report any remaining unexpected interrupts */
3125 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3126 /* If a critical error is pending there is no choice but to
3129 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3130 PFINT_OICR_ECC_ERR_M)) {
3131 set_bit(ICE_PFR_REQ, pf->state);
3132 ice_service_task_schedule(pf);
3138 ice_service_task_schedule(pf);
3139 ice_irq_dynamic_ena(hw, NULL, NULL);
3145 * ice_misc_intr_thread_fn - misc interrupt thread function
3146 * @irq: interrupt number
3147 * @data: pointer to a q_vector
3149 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3151 struct ice_pf *pf = data;
3153 if (ice_is_reset_in_progress(pf->state))
3156 while (!ice_ptp_process_ts(pf))
3157 usleep_range(50, 100);
3163 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3164 * @hw: pointer to HW structure
3166 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3168 /* disable Admin queue Interrupt causes */
3169 wr32(hw, PFINT_FW_CTL,
3170 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3172 /* disable Mailbox queue Interrupt causes */
3173 wr32(hw, PFINT_MBX_CTL,
3174 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3176 wr32(hw, PFINT_SB_CTL,
3177 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3179 /* disable Control queue Interrupt causes */
3180 wr32(hw, PFINT_OICR_CTL,
3181 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3187 * ice_free_irq_msix_misc - Unroll misc vector setup
3188 * @pf: board private structure
3190 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3192 struct ice_hw *hw = &pf->hw;
3194 ice_dis_ctrlq_interrupts(hw);
3196 /* disable OICR interrupt */
3197 wr32(hw, PFINT_OICR_ENA, 0);
3200 if (pf->msix_entries) {
3201 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
3202 devm_free_irq(ice_pf_to_dev(pf),
3203 pf->msix_entries[pf->oicr_idx].vector, pf);
3206 pf->num_avail_sw_msix += 1;
3207 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
3211 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3212 * @hw: pointer to HW structure
3213 * @reg_idx: HW vector index to associate the control queue interrupts with
3215 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3219 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3220 PFINT_OICR_CTL_CAUSE_ENA_M);
3221 wr32(hw, PFINT_OICR_CTL, val);
3223 /* enable Admin queue Interrupt causes */
3224 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3225 PFINT_FW_CTL_CAUSE_ENA_M);
3226 wr32(hw, PFINT_FW_CTL, val);
3228 /* enable Mailbox queue Interrupt causes */
3229 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3230 PFINT_MBX_CTL_CAUSE_ENA_M);
3231 wr32(hw, PFINT_MBX_CTL, val);
3233 /* This enables Sideband queue Interrupt causes */
3234 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3235 PFINT_SB_CTL_CAUSE_ENA_M);
3236 wr32(hw, PFINT_SB_CTL, val);
3242 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3243 * @pf: board private structure
3245 * This sets up the handler for MSIX 0, which is used to manage the
3246 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3247 * when in MSI or Legacy interrupt mode.
3249 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3251 struct device *dev = ice_pf_to_dev(pf);
3252 struct ice_hw *hw = &pf->hw;
3253 int oicr_idx, err = 0;
3255 if (!pf->int_name[0])
3256 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3257 dev_driver_string(dev), dev_name(dev));
3259 /* Do not request IRQ but do enable OICR interrupt since settings are
3260 * lost during reset. Note that this function is called only during
3261 * rebuild path and not while reset is in progress.
3263 if (ice_is_reset_in_progress(pf->state))
3266 /* reserve one vector in irq_tracker for misc interrupts */
3267 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3271 pf->num_avail_sw_msix -= 1;
3272 pf->oicr_idx = (u16)oicr_idx;
3274 err = devm_request_threaded_irq(dev,
3275 pf->msix_entries[pf->oicr_idx].vector,
3276 ice_misc_intr, ice_misc_intr_thread_fn,
3277 0, pf->int_name, pf);
3279 dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3281 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3282 pf->num_avail_sw_msix += 1;
3287 ice_ena_misc_vector(pf);
3289 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
3290 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
3291 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3294 ice_irq_dynamic_ena(hw, NULL, NULL);
3300 * ice_napi_add - register NAPI handler for the VSI
3301 * @vsi: VSI for which NAPI handler is to be registered
3303 * This function is only called in the driver's load path. Registering the NAPI
3304 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3305 * reset/rebuild, etc.)
3307 static void ice_napi_add(struct ice_vsi *vsi)
3314 ice_for_each_q_vector(vsi, v_idx)
3315 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3320 * ice_set_ops - set netdev and ethtools ops for the given netdev
3321 * @netdev: netdev instance
3323 static void ice_set_ops(struct net_device *netdev)
3325 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3327 if (ice_is_safe_mode(pf)) {
3328 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3329 ice_set_ethtool_safe_mode_ops(netdev);
3333 netdev->netdev_ops = &ice_netdev_ops;
3334 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3335 ice_set_ethtool_ops(netdev);
3339 * ice_set_netdev_features - set features for the given netdev
3340 * @netdev: netdev instance
3342 static void ice_set_netdev_features(struct net_device *netdev)
3344 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3345 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3346 netdev_features_t csumo_features;
3347 netdev_features_t vlano_features;
3348 netdev_features_t dflt_features;
3349 netdev_features_t tso_features;
3351 if (ice_is_safe_mode(pf)) {
3353 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3354 netdev->hw_features = netdev->features;
3358 dflt_features = NETIF_F_SG |
3363 csumo_features = NETIF_F_RXCSUM |
3368 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3369 NETIF_F_HW_VLAN_CTAG_TX |
3370 NETIF_F_HW_VLAN_CTAG_RX;
3372 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3374 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3376 tso_features = NETIF_F_TSO |
3380 NETIF_F_GSO_UDP_TUNNEL |
3381 NETIF_F_GSO_GRE_CSUM |
3382 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3383 NETIF_F_GSO_PARTIAL |
3384 NETIF_F_GSO_IPXIP4 |
3385 NETIF_F_GSO_IPXIP6 |
3388 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3389 NETIF_F_GSO_GRE_CSUM;
3390 /* set features that user can change */
3391 netdev->hw_features = dflt_features | csumo_features |
3392 vlano_features | tso_features;
3394 /* add support for HW_CSUM on packets with MPLS header */
3395 netdev->mpls_features = NETIF_F_HW_CSUM |
3399 /* enable features */
3400 netdev->features |= netdev->hw_features;
3402 netdev->hw_features |= NETIF_F_HW_TC;
3403 netdev->hw_features |= NETIF_F_LOOPBACK;
3405 /* encap and VLAN devices inherit default, csumo and tso features */
3406 netdev->hw_enc_features |= dflt_features | csumo_features |
3408 netdev->vlan_features |= dflt_features | csumo_features |
3411 /* advertise support but don't enable by default since only one type of
3412 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3413 * type turns on the other has to be turned off. This is enforced by the
3414 * ice_fix_features() ndo callback.
3417 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3418 NETIF_F_HW_VLAN_STAG_TX;
3420 /* Leave CRC / FCS stripping enabled by default, but allow the value to
3421 * be changed at runtime
3423 netdev->hw_features |= NETIF_F_RXFCS;
3427 * ice_cfg_netdev - Allocate, configure and register a netdev
3428 * @vsi: the VSI associated with the new netdev
3430 * Returns 0 on success, negative value on failure
3432 static int ice_cfg_netdev(struct ice_vsi *vsi)
3434 struct ice_netdev_priv *np;
3435 struct net_device *netdev;
3436 u8 mac_addr[ETH_ALEN];
3438 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3443 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3444 vsi->netdev = netdev;
3445 np = netdev_priv(netdev);
3448 ice_set_netdev_features(netdev);
3450 ice_set_ops(netdev);
3452 if (vsi->type == ICE_VSI_PF) {
3453 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
3454 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3455 eth_hw_addr_set(netdev, mac_addr);
3456 ether_addr_copy(netdev->perm_addr, mac_addr);
3459 netdev->priv_flags |= IFF_UNICAST_FLT;
3461 /* Setup netdev TC information */
3462 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3464 /* setup watchdog timeout value to be 5 second */
3465 netdev->watchdog_timeo = 5 * HZ;
3467 netdev->min_mtu = ETH_MIN_MTU;
3468 netdev->max_mtu = ICE_MAX_MTU;
3474 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3475 * @lut: Lookup table
3476 * @rss_table_size: Lookup table size
3477 * @rss_size: Range of queue number for hashing
3479 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3483 for (i = 0; i < rss_table_size; i++)
3484 lut[i] = i % rss_size;
3488 * ice_pf_vsi_setup - Set up a PF VSI
3489 * @pf: board private structure
3490 * @pi: pointer to the port_info instance
3492 * Returns pointer to the successfully allocated VSI software struct
3493 * on success, otherwise returns NULL on failure.
3495 static struct ice_vsi *
3496 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3498 return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL);
3501 static struct ice_vsi *
3502 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3503 struct ice_channel *ch)
3505 return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch);
3509 * ice_ctrl_vsi_setup - Set up a control VSI
3510 * @pf: board private structure
3511 * @pi: pointer to the port_info instance
3513 * Returns pointer to the successfully allocated VSI software struct
3514 * on success, otherwise returns NULL on failure.
3516 static struct ice_vsi *
3517 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3519 return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL);
3523 * ice_lb_vsi_setup - Set up a loopback VSI
3524 * @pf: board private structure
3525 * @pi: pointer to the port_info instance
3527 * Returns pointer to the successfully allocated VSI software struct
3528 * on success, otherwise returns NULL on failure.
3531 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3533 return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL);
3537 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3538 * @netdev: network interface to be adjusted
3540 * @vid: VLAN ID to be added
3542 * net_device_ops implementation for adding VLAN IDs
3545 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3547 struct ice_netdev_priv *np = netdev_priv(netdev);
3548 struct ice_vsi_vlan_ops *vlan_ops;
3549 struct ice_vsi *vsi = np->vsi;
3550 struct ice_vlan vlan;
3553 /* VLAN 0 is added by default during load/reset */
3557 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3558 usleep_range(1000, 2000);
3560 /* Add multicast promisc rule for the VLAN ID to be added if
3561 * all-multicast is currently enabled.
3563 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3564 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3565 ICE_MCAST_VLAN_PROMISC_BITS,
3571 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3573 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3574 * packets aren't pruned by the device's internal switch on Rx
3576 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3577 ret = vlan_ops->add_vlan(vsi, &vlan);
3581 /* If all-multicast is currently enabled and this VLAN ID is only one
3582 * besides VLAN-0 we have to update look-up type of multicast promisc
3583 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3585 if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3586 ice_vsi_num_non_zero_vlans(vsi) == 1) {
3587 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3588 ICE_MCAST_PROMISC_BITS, 0);
3589 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3590 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3594 clear_bit(ICE_CFG_BUSY, vsi->state);
3600 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3601 * @netdev: network interface to be adjusted
3603 * @vid: VLAN ID to be removed
3605 * net_device_ops implementation for removing VLAN IDs
3608 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3610 struct ice_netdev_priv *np = netdev_priv(netdev);
3611 struct ice_vsi_vlan_ops *vlan_ops;
3612 struct ice_vsi *vsi = np->vsi;
3613 struct ice_vlan vlan;
3616 /* don't allow removal of VLAN 0 */
3620 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3621 usleep_range(1000, 2000);
3623 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3624 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3626 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3628 vsi->current_netdev_flags |= IFF_ALLMULTI;
3631 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3633 /* Make sure VLAN delete is successful before updating VLAN
3636 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3637 ret = vlan_ops->del_vlan(vsi, &vlan);
3641 /* Remove multicast promisc rule for the removed VLAN ID if
3642 * all-multicast is enabled.
3644 if (vsi->current_netdev_flags & IFF_ALLMULTI)
3645 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3646 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3648 if (!ice_vsi_has_non_zero_vlans(vsi)) {
3649 /* Update look-up type of multicast promisc rule for VLAN 0
3650 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3651 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3653 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3654 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3655 ICE_MCAST_VLAN_PROMISC_BITS,
3657 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3658 ICE_MCAST_PROMISC_BITS, 0);
3663 clear_bit(ICE_CFG_BUSY, vsi->state);
3669 * ice_rep_indr_tc_block_unbind
3670 * @cb_priv: indirection block private data
3672 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3674 struct ice_indr_block_priv *indr_priv = cb_priv;
3676 list_del(&indr_priv->list);
3681 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3682 * @vsi: VSI struct which has the netdev
3684 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3686 struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3688 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3689 ice_rep_indr_tc_block_unbind);
3693 * ice_tc_indir_block_remove - clean indirect TC block notifications
3696 static void ice_tc_indir_block_remove(struct ice_pf *pf)
3698 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
3703 ice_tc_indir_block_unregister(pf_vsi);
3707 * ice_tc_indir_block_register - Register TC indirect block notifications
3708 * @vsi: VSI struct which has the netdev
3710 * Returns 0 on success, negative value on failure
3712 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3714 struct ice_netdev_priv *np;
3716 if (!vsi || !vsi->netdev)
3719 np = netdev_priv(vsi->netdev);
3721 INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3722 return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3726 * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3727 * @pf: board private structure
3729 * Returns 0 on success, negative value on failure
3731 static int ice_setup_pf_sw(struct ice_pf *pf)
3733 struct device *dev = ice_pf_to_dev(pf);
3734 bool dvm = ice_is_dvm_ena(&pf->hw);
3735 struct ice_vsi *vsi;
3738 if (ice_is_reset_in_progress(pf->state))
3741 status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
3745 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3749 /* init channel list */
3750 INIT_LIST_HEAD(&vsi->ch_list);
3752 status = ice_cfg_netdev(vsi);
3754 goto unroll_vsi_setup;
3755 /* netdev has to be configured before setting frame size */
3756 ice_vsi_cfg_frame_size(vsi);
3758 /* init indirect block notifications */
3759 status = ice_tc_indir_block_register(vsi);
3761 dev_err(dev, "Failed to register netdev notifier\n");
3762 goto unroll_cfg_netdev;
3765 /* Setup DCB netlink interface */
3766 ice_dcbnl_setup(vsi);
3768 /* registering the NAPI handler requires both the queues and
3769 * netdev to be created, which are done in ice_pf_vsi_setup()
3770 * and ice_cfg_netdev() respectively
3774 status = ice_init_mac_fltr(pf);
3776 goto unroll_napi_add;
3781 ice_tc_indir_block_unregister(vsi);
3786 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3787 free_netdev(vsi->netdev);
3793 ice_vsi_release(vsi);
3798 * ice_get_avail_q_count - Get count of queues in use
3799 * @pf_qmap: bitmap to get queue use count from
3800 * @lock: pointer to a mutex that protects access to pf_qmap
3801 * @size: size of the bitmap
3804 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3810 for_each_clear_bit(bit, pf_qmap, size)
3818 * ice_get_avail_txq_count - Get count of Tx queues in use
3819 * @pf: pointer to an ice_pf instance
3821 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3823 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3828 * ice_get_avail_rxq_count - Get count of Rx queues in use
3829 * @pf: pointer to an ice_pf instance
3831 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3833 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3838 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3839 * @pf: board private structure to initialize
3841 static void ice_deinit_pf(struct ice_pf *pf)
3843 ice_service_task_stop(pf);
3844 mutex_destroy(&pf->adev_mutex);
3845 mutex_destroy(&pf->sw_mutex);
3846 mutex_destroy(&pf->tc_mutex);
3847 mutex_destroy(&pf->avail_q_mutex);
3848 mutex_destroy(&pf->vfs.table_lock);
3850 if (pf->avail_txqs) {
3851 bitmap_free(pf->avail_txqs);
3852 pf->avail_txqs = NULL;
3855 if (pf->avail_rxqs) {
3856 bitmap_free(pf->avail_rxqs);
3857 pf->avail_rxqs = NULL;
3861 ptp_clock_unregister(pf->ptp.clock);
3865 * ice_set_pf_caps - set PFs capability flags
3866 * @pf: pointer to the PF instance
3868 static void ice_set_pf_caps(struct ice_pf *pf)
3870 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3872 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3873 if (func_caps->common_cap.rdma)
3874 set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3875 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3876 if (func_caps->common_cap.dcb)
3877 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3878 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3879 if (func_caps->common_cap.sr_iov_1_1) {
3880 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3881 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
3884 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3885 if (func_caps->common_cap.rss_table_size)
3886 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3888 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3889 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3892 /* ctrl_vsi_idx will be set to a valid value when flow director
3893 * is setup by ice_init_fdir
3895 pf->ctrl_vsi_idx = ICE_NO_VSI;
3896 set_bit(ICE_FLAG_FD_ENA, pf->flags);
3897 /* force guaranteed filter pool for PF */
3898 ice_alloc_fd_guar_item(&pf->hw, &unused,
3899 func_caps->fd_fltr_guar);
3900 /* force shared filter pool for PF */
3901 ice_alloc_fd_shrd_item(&pf->hw, &unused,
3902 func_caps->fd_fltr_best_effort);
3905 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3906 if (func_caps->common_cap.ieee_1588)
3907 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3909 pf->max_pf_txqs = func_caps->common_cap.num_txq;
3910 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3914 * ice_init_pf - Initialize general software structures (struct ice_pf)
3915 * @pf: board private structure to initialize
3917 static int ice_init_pf(struct ice_pf *pf)
3919 ice_set_pf_caps(pf);
3921 mutex_init(&pf->sw_mutex);
3922 mutex_init(&pf->tc_mutex);
3923 mutex_init(&pf->adev_mutex);
3925 INIT_HLIST_HEAD(&pf->aq_wait_list);
3926 spin_lock_init(&pf->aq_wait_lock);
3927 init_waitqueue_head(&pf->aq_wait_queue);
3929 init_waitqueue_head(&pf->reset_wait_queue);
3931 /* setup service timer and periodic service task */
3932 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3933 pf->serv_tmr_period = HZ;
3934 INIT_WORK(&pf->serv_task, ice_service_task);
3935 clear_bit(ICE_SERVICE_SCHED, pf->state);
3937 mutex_init(&pf->avail_q_mutex);
3938 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3939 if (!pf->avail_txqs)
3942 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3943 if (!pf->avail_rxqs) {
3944 bitmap_free(pf->avail_txqs);
3945 pf->avail_txqs = NULL;
3949 mutex_init(&pf->vfs.table_lock);
3950 hash_init(pf->vfs.table);
3956 * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
3957 * @pf: board private structure
3958 * @v_remain: number of remaining MSI-X vectors to be distributed
3960 * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
3961 * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
3962 * remaining vectors.
3964 static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
3968 if (!ice_is_rdma_ena(pf)) {
3969 pf->num_lan_msix = v_remain;
3973 /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
3974 v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
3976 if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
3977 dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
3978 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3980 pf->num_rdma_msix = 0;
3981 pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3982 } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
3983 (v_remain - v_rdma < v_rdma)) {
3984 /* Support minimum RDMA and give remaining vectors to LAN MSIX */
3985 pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
3986 pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
3988 /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
3990 pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
3991 ICE_RDMA_NUM_AEQ_MSIX;
3992 pf->num_lan_msix = v_remain - pf->num_rdma_msix;
3997 * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3998 * @pf: board private structure
4000 * Compute the number of MSIX vectors wanted and request from the OS. Adjust
4001 * device usage if there are not enough vectors. Return the number of vectors
4002 * reserved or negative on failure.
4004 static int ice_ena_msix_range(struct ice_pf *pf)
4006 int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
4007 struct device *dev = ice_pf_to_dev(pf);
4010 hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
4011 num_cpus = num_online_cpus();
4013 /* LAN miscellaneous handler */
4014 v_other = ICE_MIN_LAN_OICR_MSIX;
4017 if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
4018 v_other += ICE_FDIR_MSIX;
4021 v_other += ICE_ESWITCH_MSIX;
4026 pf->num_lan_msix = num_cpus;
4027 v_wanted += pf->num_lan_msix;
4029 /* RDMA auxiliary driver */
4030 if (ice_is_rdma_ena(pf)) {
4031 pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
4032 v_wanted += pf->num_rdma_msix;
4035 if (v_wanted > hw_num_msix) {
4038 dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
4039 v_wanted, hw_num_msix);
4041 if (hw_num_msix < ICE_MIN_MSIX) {
4046 v_remain = hw_num_msix - v_other;
4047 if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
4048 v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
4049 v_remain = ICE_MIN_LAN_TXRX_MSIX;
4052 ice_reduce_msix_usage(pf, v_remain);
4053 v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
4055 dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
4057 if (ice_is_rdma_ena(pf))
4058 dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
4062 pf->msix_entries = devm_kcalloc(dev, v_wanted,
4063 sizeof(*pf->msix_entries), GFP_KERNEL);
4064 if (!pf->msix_entries) {
4069 for (i = 0; i < v_wanted; i++)
4070 pf->msix_entries[i].entry = i;
4072 /* actually reserve the vectors */
4073 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
4074 ICE_MIN_MSIX, v_wanted);
4076 dev_err(dev, "unable to reserve MSI-X vectors\n");
4081 if (v_actual < v_wanted) {
4082 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
4083 v_wanted, v_actual);
4085 if (v_actual < ICE_MIN_MSIX) {
4086 /* error if we can't get minimum vectors */
4087 pci_disable_msix(pf->pdev);
4091 int v_remain = v_actual - v_other;
4093 if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
4094 v_remain = ICE_MIN_LAN_TXRX_MSIX;
4096 ice_reduce_msix_usage(pf, v_remain);
4098 dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
4101 if (ice_is_rdma_ena(pf))
4102 dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
4110 devm_kfree(dev, pf->msix_entries);
4113 pf->num_rdma_msix = 0;
4114 pf->num_lan_msix = 0;
4119 * ice_dis_msix - Disable MSI-X interrupt setup in OS
4120 * @pf: board private structure
4122 static void ice_dis_msix(struct ice_pf *pf)
4124 pci_disable_msix(pf->pdev);
4125 devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
4126 pf->msix_entries = NULL;
4130 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
4131 * @pf: board private structure
4133 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
4137 if (pf->irq_tracker) {
4138 devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
4139 pf->irq_tracker = NULL;
4144 * ice_init_interrupt_scheme - Determine proper interrupt scheme
4145 * @pf: board private structure to initialize
4147 static int ice_init_interrupt_scheme(struct ice_pf *pf)
4151 vectors = ice_ena_msix_range(pf);
4156 /* set up vector assignment tracking */
4157 pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
4158 struct_size(pf->irq_tracker, list, vectors),
4160 if (!pf->irq_tracker) {
4165 /* populate SW interrupts pool with number of OS granted IRQs. */
4166 pf->num_avail_sw_msix = (u16)vectors;
4167 pf->irq_tracker->num_entries = (u16)vectors;
4168 pf->irq_tracker->end = pf->irq_tracker->num_entries;
4174 * ice_is_wol_supported - check if WoL is supported
4175 * @hw: pointer to hardware info
4177 * Check if WoL is supported based on the HW configuration.
4178 * Returns true if NVM supports and enables WoL for this port, false otherwise
4180 bool ice_is_wol_supported(struct ice_hw *hw)
4184 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
4185 * word) indicates WoL is not supported on the corresponding PF ID.
4187 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
4190 return !(BIT(hw->port_info->lport) & wol_ctrl);
4194 * ice_vsi_recfg_qs - Change the number of queues on a VSI
4195 * @vsi: VSI being changed
4196 * @new_rx: new number of Rx queues
4197 * @new_tx: new number of Tx queues
4199 * Only change the number of queues if new_tx, or new_rx is non-0.
4201 * Returns 0 on success.
4203 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
4205 struct ice_pf *pf = vsi->back;
4206 int err = 0, timeout = 50;
4208 if (!new_rx && !new_tx)
4211 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4215 usleep_range(1000, 2000);
4219 vsi->req_txq = (u16)new_tx;
4221 vsi->req_rxq = (u16)new_rx;
4223 /* set for the next time the netdev is started */
4224 if (!netif_running(vsi->netdev)) {
4225 ice_vsi_rebuild(vsi, false);
4226 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4231 ice_vsi_rebuild(vsi, false);
4232 ice_pf_dcb_recfg(pf);
4235 clear_bit(ICE_CFG_BUSY, pf->state);
4240 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4241 * @pf: PF to configure
4243 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4244 * VSI can still Tx/Rx VLAN tagged packets.
4246 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4248 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4249 struct ice_vsi_ctx *ctxt;
4256 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4261 ctxt->info = vsi->info;
4263 ctxt->info.valid_sections =
4264 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4265 ICE_AQ_VSI_PROP_SECURITY_VALID |
4266 ICE_AQ_VSI_PROP_SW_VALID);
4268 /* disable VLAN anti-spoof */
4269 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4270 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4272 /* disable VLAN pruning and keep all other settings */
4273 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4275 /* allow all VLANs on Tx and don't strip on Rx */
4276 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4277 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4279 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4281 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4282 status, ice_aq_str(hw->adminq.sq_last_status));
4284 vsi->info.sec_flags = ctxt->info.sec_flags;
4285 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4286 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4293 * ice_log_pkg_init - log result of DDP package load
4294 * @hw: pointer to hardware info
4295 * @state: state of package load
4297 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4299 struct ice_pf *pf = hw->back;
4302 dev = ice_pf_to_dev(pf);
4305 case ICE_DDP_PKG_SUCCESS:
4306 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4307 hw->active_pkg_name,
4308 hw->active_pkg_ver.major,
4309 hw->active_pkg_ver.minor,
4310 hw->active_pkg_ver.update,
4311 hw->active_pkg_ver.draft);
4313 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4314 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4315 hw->active_pkg_name,
4316 hw->active_pkg_ver.major,
4317 hw->active_pkg_ver.minor,
4318 hw->active_pkg_ver.update,
4319 hw->active_pkg_ver.draft);
4321 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4322 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
4323 hw->active_pkg_name,
4324 hw->active_pkg_ver.major,
4325 hw->active_pkg_ver.minor,
4326 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4328 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4329 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4330 hw->active_pkg_name,
4331 hw->active_pkg_ver.major,
4332 hw->active_pkg_ver.minor,
4333 hw->active_pkg_ver.update,
4334 hw->active_pkg_ver.draft,
4341 case ICE_DDP_PKG_FW_MISMATCH:
4342 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
4344 case ICE_DDP_PKG_INVALID_FILE:
4345 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4347 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4348 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
4350 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4351 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
4352 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4354 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4355 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
4357 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4358 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
4360 case ICE_DDP_PKG_LOAD_ERROR:
4361 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
4362 /* poll for reset to complete */
4363 if (ice_check_reset(hw))
4364 dev_err(dev, "Error resetting device. Please reload the driver\n");
4366 case ICE_DDP_PKG_ERR:
4368 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
4374 * ice_load_pkg - load/reload the DDP Package file
4375 * @firmware: firmware structure when firmware requested or NULL for reload
4376 * @pf: pointer to the PF instance
4378 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4379 * initialize HW tables.
4382 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4384 enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4385 struct device *dev = ice_pf_to_dev(pf);
4386 struct ice_hw *hw = &pf->hw;
4388 /* Load DDP Package */
4389 if (firmware && !hw->pkg_copy) {
4390 state = ice_copy_and_init_pkg(hw, firmware->data,
4392 ice_log_pkg_init(hw, state);
4393 } else if (!firmware && hw->pkg_copy) {
4394 /* Reload package during rebuild after CORER/GLOBR reset */
4395 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4396 ice_log_pkg_init(hw, state);
4398 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4401 if (!ice_is_init_pkg_successful(state)) {
4403 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4407 /* Successful download package is the precondition for advanced
4408 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4410 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4414 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4415 * @pf: pointer to the PF structure
4417 * There is no error returned here because the driver should be able to handle
4418 * 128 Byte cache lines, so we only print a warning in case issues are seen,
4419 * specifically with Tx.
4421 static void ice_verify_cacheline_size(struct ice_pf *pf)
4423 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4424 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4425 ICE_CACHE_LINE_BYTES);
4429 * ice_send_version - update firmware with driver version
4432 * Returns 0 on success, else error code
4434 static int ice_send_version(struct ice_pf *pf)
4436 struct ice_driver_ver dv;
4438 dv.major_ver = 0xff;
4439 dv.minor_ver = 0xff;
4440 dv.build_ver = 0xff;
4441 dv.subbuild_ver = 0;
4442 strscpy((char *)dv.driver_string, UTS_RELEASE,
4443 sizeof(dv.driver_string));
4444 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4448 * ice_init_fdir - Initialize flow director VSI and configuration
4449 * @pf: pointer to the PF instance
4451 * returns 0 on success, negative on error
4453 static int ice_init_fdir(struct ice_pf *pf)
4455 struct device *dev = ice_pf_to_dev(pf);
4456 struct ice_vsi *ctrl_vsi;
4459 /* Side Band Flow Director needs to have a control VSI.
4460 * Allocate it and store it in the PF.
4462 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4464 dev_dbg(dev, "could not create control VSI\n");
4468 err = ice_vsi_open_ctrl(ctrl_vsi);
4470 dev_dbg(dev, "could not open control VSI\n");
4474 mutex_init(&pf->hw.fdir_fltr_lock);
4476 err = ice_fdir_create_dflt_rules(pf);
4483 ice_fdir_release_flows(&pf->hw);
4484 ice_vsi_close(ctrl_vsi);
4486 ice_vsi_release(ctrl_vsi);
4487 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4488 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4489 pf->ctrl_vsi_idx = ICE_NO_VSI;
4495 * ice_get_opt_fw_name - return optional firmware file name or NULL
4496 * @pf: pointer to the PF instance
4498 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4500 /* Optional firmware name same as default with additional dash
4501 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4503 struct pci_dev *pdev = pf->pdev;
4504 char *opt_fw_filename;
4507 /* Determine the name of the optional file using the DSN (two
4508 * dwords following the start of the DSN Capability).
4510 dsn = pci_get_dsn(pdev);
4514 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4515 if (!opt_fw_filename)
4518 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4519 ICE_DDP_PKG_PATH, dsn);
4521 return opt_fw_filename;
4525 * ice_request_fw - Device initialization routine
4526 * @pf: pointer to the PF instance
4528 static void ice_request_fw(struct ice_pf *pf)
4530 char *opt_fw_filename = ice_get_opt_fw_name(pf);
4531 const struct firmware *firmware = NULL;
4532 struct device *dev = ice_pf_to_dev(pf);
4535 /* optional device-specific DDP (if present) overrides the default DDP
4536 * package file. kernel logs a debug message if the file doesn't exist,
4537 * and warning messages for other errors.
4539 if (opt_fw_filename) {
4540 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4542 kfree(opt_fw_filename);
4546 /* request for firmware was successful. Download to device */
4547 ice_load_pkg(firmware, pf);
4548 kfree(opt_fw_filename);
4549 release_firmware(firmware);
4554 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4556 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4560 /* request for firmware was successful. Download to device */
4561 ice_load_pkg(firmware, pf);
4562 release_firmware(firmware);
4566 * ice_print_wake_reason - show the wake up cause in the log
4567 * @pf: pointer to the PF struct
4569 static void ice_print_wake_reason(struct ice_pf *pf)
4571 u32 wus = pf->wakeup_reason;
4572 const char *wake_str;
4574 /* if no wake event, nothing to print */
4578 if (wus & PFPM_WUS_LNKC_M)
4579 wake_str = "Link\n";
4580 else if (wus & PFPM_WUS_MAG_M)
4581 wake_str = "Magic Packet\n";
4582 else if (wus & PFPM_WUS_MNG_M)
4583 wake_str = "Management\n";
4584 else if (wus & PFPM_WUS_FW_RST_WK_M)
4585 wake_str = "Firmware Reset\n";
4587 wake_str = "Unknown\n";
4589 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4593 * ice_register_netdev - register netdev
4594 * @pf: pointer to the PF struct
4596 static int ice_register_netdev(struct ice_pf *pf)
4598 struct ice_vsi *vsi;
4601 vsi = ice_get_main_vsi(pf);
4602 if (!vsi || !vsi->netdev)
4605 err = register_netdev(vsi->netdev);
4607 goto err_register_netdev;
4609 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4610 netif_carrier_off(vsi->netdev);
4611 netif_tx_stop_all_queues(vsi->netdev);
4614 err_register_netdev:
4615 free_netdev(vsi->netdev);
4617 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4622 * ice_probe - Device initialization routine
4623 * @pdev: PCI device information struct
4624 * @ent: entry in ice_pci_tbl
4626 * Returns 0 on success, negative on failure
4629 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4631 struct device *dev = &pdev->dev;
4632 struct ice_vsi *vsi;
4637 if (pdev->is_virtfn) {
4638 dev_err(dev, "can't probe a virtual function\n");
4642 /* this driver uses devres, see
4643 * Documentation/driver-api/driver-model/devres.rst
4645 err = pcim_enable_device(pdev);
4649 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4651 dev_err(dev, "BAR0 I/O map error %d\n", err);
4655 pf = ice_allocate_pf(dev);
4659 /* initialize Auxiliary index to invalid value */
4662 /* set up for high or low DMA */
4663 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4665 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4669 pci_enable_pcie_error_reporting(pdev);
4670 pci_set_master(pdev);
4673 pci_set_drvdata(pdev, pf);
4674 set_bit(ICE_DOWN, pf->state);
4675 /* Disable service task until DOWN bit is cleared */
4676 set_bit(ICE_SERVICE_DIS, pf->state);
4679 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4680 pci_save_state(pdev);
4683 hw->vendor_id = pdev->vendor;
4684 hw->device_id = pdev->device;
4685 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4686 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4687 hw->subsystem_device_id = pdev->subsystem_device;
4688 hw->bus.device = PCI_SLOT(pdev->devfn);
4689 hw->bus.func = PCI_FUNC(pdev->devfn);
4690 ice_set_ctrlq_len(hw);
4692 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4694 #ifndef CONFIG_DYNAMIC_DEBUG
4696 hw->debug_mask = debug;
4699 err = ice_init_hw(hw);
4701 dev_err(dev, "ice_init_hw failed: %d\n", err);
4703 goto err_exit_unroll;
4706 ice_init_feature_support(pf);
4710 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4711 * set in pf->state, which will cause ice_is_safe_mode to return
4714 if (ice_is_safe_mode(pf)) {
4715 /* we already got function/device capabilities but these don't
4716 * reflect what the driver needs to do in safe mode. Instead of
4717 * adding conditional logic everywhere to ignore these
4718 * device/function capabilities, override them.
4720 ice_set_safe_mode_caps(hw);
4723 err = ice_init_pf(pf);
4725 dev_err(dev, "ice_init_pf failed: %d\n", err);
4726 goto err_init_pf_unroll;
4729 ice_devlink_init_regions(pf);
4731 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4732 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4733 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4734 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4736 if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4737 pf->hw.udp_tunnel_nic.tables[i].n_entries =
4738 pf->hw.tnl.valid_count[TNL_VXLAN];
4739 pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4740 UDP_TUNNEL_TYPE_VXLAN;
4743 if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4744 pf->hw.udp_tunnel_nic.tables[i].n_entries =
4745 pf->hw.tnl.valid_count[TNL_GENEVE];
4746 pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4747 UDP_TUNNEL_TYPE_GENEVE;
4751 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4752 if (!pf->num_alloc_vsi) {
4754 goto err_init_pf_unroll;
4756 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4757 dev_warn(&pf->pdev->dev,
4758 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4759 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4760 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4763 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4767 goto err_init_pf_unroll;
4770 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
4771 sizeof(*pf->vsi_stats), GFP_KERNEL);
4772 if (!pf->vsi_stats) {
4774 goto err_init_vsi_unroll;
4777 err = ice_init_interrupt_scheme(pf);
4779 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4781 goto err_init_vsi_stats_unroll;
4784 /* In case of MSIX we are going to setup the misc vector right here
4785 * to handle admin queue events etc. In case of legacy and MSI
4786 * the misc functionality and queue processing is combined in
4787 * the same vector and that gets setup at open.
4789 err = ice_req_irq_msix_misc(pf);
4791 dev_err(dev, "setup of misc vector failed: %d\n", err);
4792 goto err_init_interrupt_unroll;
4795 /* create switch struct for the switch element created by FW on boot */
4796 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4797 if (!pf->first_sw) {
4799 goto err_msix_misc_unroll;
4803 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4805 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4807 pf->first_sw->pf = pf;
4809 /* record the sw_id available for later use */
4810 pf->first_sw->sw_id = hw->port_info->sw_id;
4812 err = ice_setup_pf_sw(pf);
4814 dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4815 goto err_alloc_sw_unroll;
4818 clear_bit(ICE_SERVICE_DIS, pf->state);
4820 /* tell the firmware we are up */
4821 err = ice_send_version(pf);
4823 dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4825 goto err_send_version_unroll;
4828 /* since everything is good, start the service timer */
4829 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4831 err = ice_init_link_events(pf->hw.port_info);
4833 dev_err(dev, "ice_init_link_events failed: %d\n", err);
4834 goto err_send_version_unroll;
4837 /* not a fatal error if this fails */
4838 err = ice_init_nvm_phy_type(pf->hw.port_info);
4840 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4842 /* not a fatal error if this fails */
4843 err = ice_update_link_info(pf->hw.port_info);
4845 dev_err(dev, "ice_update_link_info failed: %d\n", err);
4847 ice_init_link_dflt_override(pf->hw.port_info);
4849 ice_check_link_cfg_err(pf,
4850 pf->hw.port_info->phy.link_info.link_cfg_err);
4852 /* if media available, initialize PHY settings */
4853 if (pf->hw.port_info->phy.link_info.link_info &
4854 ICE_AQ_MEDIA_AVAILABLE) {
4855 /* not a fatal error if this fails */
4856 err = ice_init_phy_user_cfg(pf->hw.port_info);
4858 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4860 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4861 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4864 ice_configure_phy(vsi);
4867 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4870 ice_verify_cacheline_size(pf);
4872 /* Save wakeup reason register for later use */
4873 pf->wakeup_reason = rd32(hw, PFPM_WUS);
4875 /* check for a power management event */
4876 ice_print_wake_reason(pf);
4878 /* clear wake status, all bits */
4879 wr32(hw, PFPM_WUS, U32_MAX);
4881 /* Disable WoL at init, wait for user to enable */
4882 device_set_wakeup_enable(dev, false);
4884 if (ice_is_safe_mode(pf)) {
4885 ice_set_safe_mode_vlan_cfg(pf);
4889 /* initialize DDP driven features */
4890 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4893 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4896 /* Note: Flow director init failure is non-fatal to load */
4897 if (ice_init_fdir(pf))
4898 dev_err(dev, "could not initialize flow director\n");
4900 /* Note: DCB init failure is non-fatal to load */
4901 if (ice_init_pf_dcb(pf, false)) {
4902 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4903 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4905 ice_cfg_lldp_mib_change(&pf->hw, true);
4908 if (ice_init_lag(pf))
4909 dev_warn(dev, "Failed to init link aggregation support\n");
4911 /* print PCI link speed and width */
4912 pcie_print_link_status(pf->pdev);
4915 err = ice_devlink_create_pf_port(pf);
4917 goto err_create_pf_port;
4919 vsi = ice_get_main_vsi(pf);
4920 if (!vsi || !vsi->netdev) {
4922 goto err_netdev_reg;
4925 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
4927 err = ice_register_netdev(pf);
4929 goto err_netdev_reg;
4931 err = ice_devlink_register_params(pf);
4933 goto err_netdev_reg;
4935 /* ready to go, so clear down state bit */
4936 clear_bit(ICE_DOWN, pf->state);
4937 if (ice_is_rdma_ena(pf)) {
4938 pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
4939 if (pf->aux_idx < 0) {
4940 dev_err(dev, "Failed to allocate device ID for AUX driver\n");
4942 goto err_devlink_reg_param;
4945 err = ice_init_rdma(pf);
4947 dev_err(dev, "Failed to initialize RDMA: %d\n", err);
4949 goto err_init_aux_unroll;
4952 dev_warn(dev, "RDMA is not supported on this device\n");
4955 ice_devlink_register(pf);
4958 err_init_aux_unroll:
4960 ida_free(&ice_aux_ida, pf->aux_idx);
4961 err_devlink_reg_param:
4962 ice_devlink_unregister_params(pf);
4964 ice_devlink_destroy_pf_port(pf);
4966 err_send_version_unroll:
4967 ice_vsi_release_all(pf);
4968 err_alloc_sw_unroll:
4969 set_bit(ICE_SERVICE_DIS, pf->state);
4970 set_bit(ICE_DOWN, pf->state);
4971 devm_kfree(dev, pf->first_sw);
4972 err_msix_misc_unroll:
4973 ice_free_irq_msix_misc(pf);
4974 err_init_interrupt_unroll:
4975 ice_clear_interrupt_scheme(pf);
4976 err_init_vsi_stats_unroll:
4977 devm_kfree(dev, pf->vsi_stats);
4978 pf->vsi_stats = NULL;
4979 err_init_vsi_unroll:
4980 devm_kfree(dev, pf->vsi);
4983 ice_devlink_destroy_regions(pf);
4986 pci_disable_pcie_error_reporting(pdev);
4987 pci_disable_device(pdev);
4992 * ice_set_wake - enable or disable Wake on LAN
4993 * @pf: pointer to the PF struct
4995 * Simple helper for WoL control
4997 static void ice_set_wake(struct ice_pf *pf)
4999 struct ice_hw *hw = &pf->hw;
5000 bool wol = pf->wol_ena;
5002 /* clear wake state, otherwise new wake events won't fire */
5003 wr32(hw, PFPM_WUS, U32_MAX);
5005 /* enable / disable APM wake up, no RMW needed */
5006 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5008 /* set magic packet filter enabled */
5009 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5013 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5014 * @pf: pointer to the PF struct
5016 * Issue firmware command to enable multicast magic wake, making
5017 * sure that any locally administered address (LAA) is used for
5018 * wake, and that PF reset doesn't undo the LAA.
5020 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5022 struct device *dev = ice_pf_to_dev(pf);
5023 struct ice_hw *hw = &pf->hw;
5024 u8 mac_addr[ETH_ALEN];
5025 struct ice_vsi *vsi;
5032 vsi = ice_get_main_vsi(pf);
5036 /* Get current MAC address in case it's an LAA */
5038 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5040 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5042 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5043 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5044 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5046 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5048 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5049 status, ice_aq_str(hw->adminq.sq_last_status));
5053 * ice_remove - Device removal routine
5054 * @pdev: PCI device information struct
5056 static void ice_remove(struct pci_dev *pdev)
5058 struct ice_pf *pf = pci_get_drvdata(pdev);
5061 ice_devlink_unregister(pf);
5062 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5063 if (!ice_is_reset_in_progress(pf->state))
5068 ice_tc_indir_block_remove(pf);
5070 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5071 set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5075 ice_service_task_stop(pf);
5077 ice_aq_cancel_waiting_tasks(pf);
5078 ice_unplug_aux_dev(pf);
5079 if (pf->aux_idx >= 0)
5080 ida_free(&ice_aux_ida, pf->aux_idx);
5081 ice_devlink_unregister_params(pf);
5082 set_bit(ICE_DOWN, pf->state);
5085 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
5086 ice_ptp_release(pf);
5087 if (ice_is_feature_supported(pf, ICE_F_GNSS))
5089 if (!ice_is_safe_mode(pf))
5090 ice_remove_arfs(pf);
5091 ice_setup_mc_magic_wake(pf);
5092 ice_vsi_release_all(pf);
5093 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
5094 ice_devlink_destroy_pf_port(pf);
5096 ice_free_irq_msix_misc(pf);
5097 ice_for_each_vsi(pf, i) {
5100 ice_vsi_free_q_vectors(pf->vsi[i]);
5102 devm_kfree(&pdev->dev, pf->vsi_stats);
5103 pf->vsi_stats = NULL;
5105 ice_devlink_destroy_regions(pf);
5106 ice_deinit_hw(&pf->hw);
5108 /* Issue a PFR as part of the prescribed driver unload flow. Do not
5109 * do it via ice_schedule_reset() since there is no need to rebuild
5110 * and the service task is already stopped.
5112 ice_reset(&pf->hw, ICE_RESET_PFR);
5113 pci_wait_for_pending_transaction(pdev);
5114 ice_clear_interrupt_scheme(pf);
5115 pci_disable_pcie_error_reporting(pdev);
5116 pci_disable_device(pdev);
5120 * ice_shutdown - PCI callback for shutting down device
5121 * @pdev: PCI device information struct
5123 static void ice_shutdown(struct pci_dev *pdev)
5125 struct ice_pf *pf = pci_get_drvdata(pdev);
5129 if (system_state == SYSTEM_POWER_OFF) {
5130 pci_wake_from_d3(pdev, pf->wol_ena);
5131 pci_set_power_state(pdev, PCI_D3hot);
5137 * ice_prepare_for_shutdown - prep for PCI shutdown
5138 * @pf: board private structure
5140 * Inform or close all dependent features in prep for PCI device shutdown
5142 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5144 struct ice_hw *hw = &pf->hw;
5147 /* Notify VFs of impending reset */
5148 if (ice_check_sq_alive(hw, &hw->mailboxq))
5149 ice_vc_notify_reset(pf);
5151 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5153 /* disable the VSIs and their queues that are not already DOWN */
5154 ice_pf_dis_all_vsi(pf, false);
5156 ice_for_each_vsi(pf, v)
5158 pf->vsi[v]->vsi_num = 0;
5160 ice_shutdown_all_ctrlq(hw);
5164 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5165 * @pf: board private structure to reinitialize
5167 * This routine reinitialize interrupt scheme that was cleared during
5168 * power management suspend callback.
5170 * This should be called during resume routine to re-allocate the q_vectors
5171 * and reacquire interrupts.
5173 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5175 struct device *dev = ice_pf_to_dev(pf);
5178 /* Since we clear MSIX flag during suspend, we need to
5179 * set it back during resume...
5182 ret = ice_init_interrupt_scheme(pf);
5184 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5188 /* Remap vectors and rings, after successful re-init interrupts */
5189 ice_for_each_vsi(pf, v) {
5193 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5196 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5199 ret = ice_req_irq_msix_misc(pf);
5201 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5211 ice_vsi_free_q_vectors(pf->vsi[v]);
5218 * @dev: generic device information structure
5220 * Power Management callback to quiesce the device and prepare
5221 * for D3 transition.
5223 static int __maybe_unused ice_suspend(struct device *dev)
5225 struct pci_dev *pdev = to_pci_dev(dev);
5229 pf = pci_get_drvdata(pdev);
5231 if (!ice_pf_state_is_nominal(pf)) {
5232 dev_err(dev, "Device is not ready, no need to suspend it\n");
5236 /* Stop watchdog tasks until resume completion.
5237 * Even though it is most likely that the service task is
5238 * disabled if the device is suspended or down, the service task's
5239 * state is controlled by a different state bit, and we should
5240 * store and honor whatever state that bit is in at this point.
5242 disabled = ice_service_task_stop(pf);
5244 ice_unplug_aux_dev(pf);
5246 /* Already suspended?, then there is nothing to do */
5247 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5249 ice_service_task_restart(pf);
5253 if (test_bit(ICE_DOWN, pf->state) ||
5254 ice_is_reset_in_progress(pf->state)) {
5255 dev_err(dev, "can't suspend device in reset or already down\n");
5257 ice_service_task_restart(pf);
5261 ice_setup_mc_magic_wake(pf);
5263 ice_prepare_for_shutdown(pf);
5267 /* Free vectors, clear the interrupt scheme and release IRQs
5268 * for proper hibernation, especially with large number of CPUs.
5269 * Otherwise hibernation might fail when mapping all the vectors back
5272 ice_free_irq_msix_misc(pf);
5273 ice_for_each_vsi(pf, v) {
5276 ice_vsi_free_q_vectors(pf->vsi[v]);
5278 ice_clear_interrupt_scheme(pf);
5280 pci_save_state(pdev);
5281 pci_wake_from_d3(pdev, pf->wol_ena);
5282 pci_set_power_state(pdev, PCI_D3hot);
5287 * ice_resume - PM callback for waking up from D3
5288 * @dev: generic device information structure
5290 static int __maybe_unused ice_resume(struct device *dev)
5292 struct pci_dev *pdev = to_pci_dev(dev);
5293 enum ice_reset_req reset_type;
5298 pci_set_power_state(pdev, PCI_D0);
5299 pci_restore_state(pdev);
5300 pci_save_state(pdev);
5302 if (!pci_device_is_present(pdev))
5305 ret = pci_enable_device_mem(pdev);
5307 dev_err(dev, "Cannot enable device after suspend\n");
5311 pf = pci_get_drvdata(pdev);
5314 pf->wakeup_reason = rd32(hw, PFPM_WUS);
5315 ice_print_wake_reason(pf);
5317 /* We cleared the interrupt scheme when we suspended, so we need to
5318 * restore it now to resume device functionality.
5320 ret = ice_reinit_interrupt_scheme(pf);
5322 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5324 clear_bit(ICE_DOWN, pf->state);
5325 /* Now perform PF reset and rebuild */
5326 reset_type = ICE_RESET_PFR;
5327 /* re-enable service task for reset, but allow reset to schedule it */
5328 clear_bit(ICE_SERVICE_DIS, pf->state);
5330 if (ice_schedule_reset(pf, reset_type))
5331 dev_err(dev, "Reset during resume failed.\n");
5333 clear_bit(ICE_SUSPENDED, pf->state);
5334 ice_service_task_restart(pf);
5336 /* Restart the service task */
5337 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5341 #endif /* CONFIG_PM */
5344 * ice_pci_err_detected - warning that PCI error has been detected
5345 * @pdev: PCI device information struct
5346 * @err: the type of PCI error
5348 * Called to warn that something happened on the PCI bus and the error handling
5349 * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
5351 static pci_ers_result_t
5352 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5354 struct ice_pf *pf = pci_get_drvdata(pdev);
5357 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5359 return PCI_ERS_RESULT_DISCONNECT;
5362 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5363 ice_service_task_stop(pf);
5365 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5366 set_bit(ICE_PFR_REQ, pf->state);
5367 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5371 return PCI_ERS_RESULT_NEED_RESET;
5375 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5376 * @pdev: PCI device information struct
5378 * Called to determine if the driver can recover from the PCI slot reset by
5379 * using a register read to determine if the device is recoverable.
5381 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5383 struct ice_pf *pf = pci_get_drvdata(pdev);
5384 pci_ers_result_t result;
5388 err = pci_enable_device_mem(pdev);
5390 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5392 result = PCI_ERS_RESULT_DISCONNECT;
5394 pci_set_master(pdev);
5395 pci_restore_state(pdev);
5396 pci_save_state(pdev);
5397 pci_wake_from_d3(pdev, false);
5399 /* Check for life */
5400 reg = rd32(&pf->hw, GLGEN_RTRIG);
5402 result = PCI_ERS_RESULT_RECOVERED;
5404 result = PCI_ERS_RESULT_DISCONNECT;
5411 * ice_pci_err_resume - restart operations after PCI error recovery
5412 * @pdev: PCI device information struct
5414 * Called to allow the driver to bring things back up after PCI error and/or
5415 * reset recovery have finished
5417 static void ice_pci_err_resume(struct pci_dev *pdev)
5419 struct ice_pf *pf = pci_get_drvdata(pdev);
5422 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5427 if (test_bit(ICE_SUSPENDED, pf->state)) {
5428 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5433 ice_restore_all_vfs_msi_state(pdev);
5435 ice_do_reset(pf, ICE_RESET_PFR);
5436 ice_service_task_restart(pf);
5437 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5441 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5442 * @pdev: PCI device information struct
5444 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5446 struct ice_pf *pf = pci_get_drvdata(pdev);
5448 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5449 ice_service_task_stop(pf);
5451 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5452 set_bit(ICE_PFR_REQ, pf->state);
5453 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5459 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5460 * @pdev: PCI device information struct
5462 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5464 ice_pci_err_resume(pdev);
5467 /* ice_pci_tbl - PCI Device ID Table
5469 * Wildcard entries (PCI_ANY_ID) should come last
5470 * Last entry must be all 0s
5472 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5473 * Class, Class Mask, private data (not used) }
5475 static const struct pci_device_id ice_pci_tbl[] = {
5476 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5477 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5478 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5479 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5480 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5481 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5482 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5483 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5484 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5485 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5486 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5487 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5488 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5489 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5490 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5491 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5492 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5493 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5494 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5495 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5496 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5497 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5498 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5499 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5500 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5501 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
5502 /* required last entry */
5505 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5507 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5509 static const struct pci_error_handlers ice_pci_err_handler = {
5510 .error_detected = ice_pci_err_detected,
5511 .slot_reset = ice_pci_err_slot_reset,
5512 .reset_prepare = ice_pci_err_reset_prepare,
5513 .reset_done = ice_pci_err_reset_done,
5514 .resume = ice_pci_err_resume
5517 static struct pci_driver ice_driver = {
5518 .name = KBUILD_MODNAME,
5519 .id_table = ice_pci_tbl,
5521 .remove = ice_remove,
5523 .driver.pm = &ice_pm_ops,
5524 #endif /* CONFIG_PM */
5525 .shutdown = ice_shutdown,
5526 .sriov_configure = ice_sriov_configure,
5527 .err_handler = &ice_pci_err_handler
5531 * ice_module_init - Driver registration routine
5533 * ice_module_init is the first routine called when the driver is
5534 * loaded. All it does is register with the PCI subsystem.
5536 static int __init ice_module_init(void)
5540 pr_info("%s\n", ice_driver_string);
5541 pr_info("%s\n", ice_copyright);
5543 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
5545 pr_err("Failed to create workqueue\n");
5549 status = pci_register_driver(&ice_driver);
5551 pr_err("failed to register PCI driver, err %d\n", status);
5552 destroy_workqueue(ice_wq);
5557 module_init(ice_module_init);
5560 * ice_module_exit - Driver exit cleanup routine
5562 * ice_module_exit is called just before the driver is removed
5565 static void __exit ice_module_exit(void)
5567 pci_unregister_driver(&ice_driver);
5568 destroy_workqueue(ice_wq);
5569 pr_info("module unloaded\n");
5571 module_exit(ice_module_exit);
5574 * ice_set_mac_address - NDO callback to set MAC address
5575 * @netdev: network interface device structure
5576 * @pi: pointer to an address structure
5578 * Returns 0 on success, negative on failure
5580 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5582 struct ice_netdev_priv *np = netdev_priv(netdev);
5583 struct ice_vsi *vsi = np->vsi;
5584 struct ice_pf *pf = vsi->back;
5585 struct ice_hw *hw = &pf->hw;
5586 struct sockaddr *addr = pi;
5587 u8 old_mac[ETH_ALEN];
5592 mac = (u8 *)addr->sa_data;
5594 if (!is_valid_ether_addr(mac))
5595 return -EADDRNOTAVAIL;
5597 if (ether_addr_equal(netdev->dev_addr, mac)) {
5598 netdev_dbg(netdev, "already using mac %pM\n", mac);
5602 if (test_bit(ICE_DOWN, pf->state) ||
5603 ice_is_reset_in_progress(pf->state)) {
5604 netdev_err(netdev, "can't set mac %pM. device not ready\n",
5609 if (ice_chnl_dmac_fltr_cnt(pf)) {
5610 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5615 netif_addr_lock_bh(netdev);
5616 ether_addr_copy(old_mac, netdev->dev_addr);
5617 /* change the netdev's MAC address */
5618 eth_hw_addr_set(netdev, mac);
5619 netif_addr_unlock_bh(netdev);
5621 /* Clean up old MAC filter. Not an error if old filter doesn't exist */
5622 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5623 if (err && err != -ENOENT) {
5624 err = -EADDRNOTAVAIL;
5625 goto err_update_filters;
5628 /* Add filter for new MAC. If filter exists, return success */
5629 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5630 if (err == -EEXIST) {
5631 /* Although this MAC filter is already present in hardware it's
5632 * possible in some cases (e.g. bonding) that dev_addr was
5633 * modified outside of the driver and needs to be restored back
5636 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5640 /* error if the new filter addition failed */
5641 err = -EADDRNOTAVAIL;
5646 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5648 netif_addr_lock_bh(netdev);
5649 eth_hw_addr_set(netdev, old_mac);
5650 netif_addr_unlock_bh(netdev);
5654 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5657 /* write new MAC address to the firmware */
5658 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5659 err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5661 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
5668 * ice_set_rx_mode - NDO callback to set the netdev filters
5669 * @netdev: network interface device structure
5671 static void ice_set_rx_mode(struct net_device *netdev)
5673 struct ice_netdev_priv *np = netdev_priv(netdev);
5674 struct ice_vsi *vsi = np->vsi;
5679 /* Set the flags to synchronize filters
5680 * ndo_set_rx_mode may be triggered even without a change in netdev
5683 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5684 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5685 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5687 /* schedule our worker thread which will take care of
5688 * applying the new filter changes
5690 ice_service_task_schedule(vsi->back);
5694 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5695 * @netdev: network interface device structure
5696 * @queue_index: Queue ID
5697 * @maxrate: maximum bandwidth in Mbps
5700 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5702 struct ice_netdev_priv *np = netdev_priv(netdev);
5703 struct ice_vsi *vsi = np->vsi;
5708 /* Validate maxrate requested is within permitted range */
5709 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5710 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5711 maxrate, queue_index);
5715 q_handle = vsi->tx_rings[queue_index]->q_handle;
5716 tc = ice_dcb_get_tc(vsi, queue_index);
5718 /* Set BW back to default, when user set maxrate to 0 */
5720 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5721 q_handle, ICE_MAX_BW);
5723 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5724 q_handle, ICE_MAX_BW, maxrate * 1000);
5726 netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
5733 * ice_fdb_add - add an entry to the hardware database
5734 * @ndm: the input from the stack
5735 * @tb: pointer to array of nladdr (unused)
5736 * @dev: the net device pointer
5737 * @addr: the MAC address entry being added
5739 * @flags: instructions from stack about fdb operation
5740 * @extack: netlink extended ack
5743 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5744 struct net_device *dev, const unsigned char *addr, u16 vid,
5745 u16 flags, struct netlink_ext_ack __always_unused *extack)
5750 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5753 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5754 netdev_err(dev, "FDB only supports static addresses\n");
5758 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5759 err = dev_uc_add_excl(dev, addr);
5760 else if (is_multicast_ether_addr(addr))
5761 err = dev_mc_add_excl(dev, addr);
5765 /* Only return duplicate errors if NLM_F_EXCL is set */
5766 if (err == -EEXIST && !(flags & NLM_F_EXCL))
5773 * ice_fdb_del - delete an entry from the hardware database
5774 * @ndm: the input from the stack
5775 * @tb: pointer to array of nladdr (unused)
5776 * @dev: the net device pointer
5777 * @addr: the MAC address entry being added
5779 * @extack: netlink extended ack
5782 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5783 struct net_device *dev, const unsigned char *addr,
5784 __always_unused u16 vid, struct netlink_ext_ack *extack)
5788 if (ndm->ndm_state & NUD_PERMANENT) {
5789 netdev_err(dev, "FDB only supports static addresses\n");
5793 if (is_unicast_ether_addr(addr))
5794 err = dev_uc_del(dev, addr);
5795 else if (is_multicast_ether_addr(addr))
5796 err = dev_mc_del(dev, addr);
5803 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
5804 NETIF_F_HW_VLAN_CTAG_TX | \
5805 NETIF_F_HW_VLAN_STAG_RX | \
5806 NETIF_F_HW_VLAN_STAG_TX)
5808 #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
5809 NETIF_F_HW_VLAN_STAG_RX)
5811 #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
5812 NETIF_F_HW_VLAN_STAG_FILTER)
5815 * ice_fix_features - fix the netdev features flags based on device limitations
5816 * @netdev: ptr to the netdev that flags are being fixed on
5817 * @features: features that need to be checked and possibly fixed
5819 * Make sure any fixups are made to features in this callback. This enables the
5820 * driver to not have to check unsupported configurations throughout the driver
5821 * because that's the responsiblity of this callback.
5823 * Single VLAN Mode (SVM) Supported Features:
5824 * NETIF_F_HW_VLAN_CTAG_FILTER
5825 * NETIF_F_HW_VLAN_CTAG_RX
5826 * NETIF_F_HW_VLAN_CTAG_TX
5828 * Double VLAN Mode (DVM) Supported Features:
5829 * NETIF_F_HW_VLAN_CTAG_FILTER
5830 * NETIF_F_HW_VLAN_CTAG_RX
5831 * NETIF_F_HW_VLAN_CTAG_TX
5833 * NETIF_F_HW_VLAN_STAG_FILTER
5834 * NETIF_HW_VLAN_STAG_RX
5835 * NETIF_HW_VLAN_STAG_TX
5837 * Features that need fixing:
5838 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
5839 * These are mutually exlusive as the VSI context cannot support multiple
5840 * VLAN ethertypes simultaneously for stripping and/or insertion. If this
5841 * is not done, then default to clearing the requested STAG offload
5844 * All supported filtering has to be enabled or disabled together. For
5845 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
5846 * together. If this is not done, then default to VLAN filtering disabled.
5847 * These are mutually exclusive as there is currently no way to
5848 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
5851 static netdev_features_t
5852 ice_fix_features(struct net_device *netdev, netdev_features_t features)
5854 struct ice_netdev_priv *np = netdev_priv(netdev);
5855 netdev_features_t req_vlan_fltr, cur_vlan_fltr;
5856 bool cur_ctag, cur_stag, req_ctag, req_stag;
5858 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
5859 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5860 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5862 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
5863 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5864 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5866 if (req_vlan_fltr != cur_vlan_fltr) {
5867 if (ice_is_dvm_ena(&np->vsi->back->hw)) {
5868 if (req_ctag && req_stag) {
5869 features |= NETIF_VLAN_FILTERING_FEATURES;
5870 } else if (!req_ctag && !req_stag) {
5871 features &= ~NETIF_VLAN_FILTERING_FEATURES;
5872 } else if ((!cur_ctag && req_ctag && !cur_stag) ||
5873 (!cur_stag && req_stag && !cur_ctag)) {
5874 features |= NETIF_VLAN_FILTERING_FEATURES;
5875 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
5876 } else if ((cur_ctag && !req_ctag && cur_stag) ||
5877 (cur_stag && !req_stag && cur_ctag)) {
5878 features &= ~NETIF_VLAN_FILTERING_FEATURES;
5879 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
5882 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
5883 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
5885 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
5886 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5890 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
5891 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
5892 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
5893 features &= ~(NETIF_F_HW_VLAN_STAG_RX |
5894 NETIF_F_HW_VLAN_STAG_TX);
5897 if (!(netdev->features & NETIF_F_RXFCS) &&
5898 (features & NETIF_F_RXFCS) &&
5899 (features & NETIF_VLAN_STRIPPING_FEATURES) &&
5900 !ice_vsi_has_non_zero_vlans(np->vsi)) {
5901 netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
5902 features &= ~NETIF_VLAN_STRIPPING_FEATURES;
5909 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
5911 * @features: features used to determine VLAN offload settings
5913 * First, determine the vlan_ethertype based on the VLAN offload bits in
5914 * features. Then determine if stripping and insertion should be enabled or
5915 * disabled. Finally enable or disable VLAN stripping and insertion.
5918 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
5920 bool enable_stripping = true, enable_insertion = true;
5921 struct ice_vsi_vlan_ops *vlan_ops;
5922 int strip_err = 0, insert_err = 0;
5923 u16 vlan_ethertype = 0;
5925 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
5927 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
5928 vlan_ethertype = ETH_P_8021AD;
5929 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
5930 vlan_ethertype = ETH_P_8021Q;
5932 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
5933 enable_stripping = false;
5934 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
5935 enable_insertion = false;
5937 if (enable_stripping)
5938 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
5940 strip_err = vlan_ops->dis_stripping(vsi);
5942 if (enable_insertion)
5943 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
5945 insert_err = vlan_ops->dis_insertion(vsi);
5947 if (strip_err || insert_err)
5954 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
5956 * @features: features used to determine VLAN filtering settings
5958 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
5962 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
5964 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
5967 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
5968 * if either bit is set
5971 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
5972 err = vlan_ops->ena_rx_filtering(vsi);
5974 err = vlan_ops->dis_rx_filtering(vsi);
5980 * ice_set_vlan_features - set VLAN settings based on suggested feature set
5981 * @netdev: ptr to the netdev being adjusted
5982 * @features: the feature set that the stack is suggesting
5984 * Only update VLAN settings if the requested_vlan_features are different than
5985 * the current_vlan_features.
5988 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
5990 netdev_features_t current_vlan_features, requested_vlan_features;
5991 struct ice_netdev_priv *np = netdev_priv(netdev);
5992 struct ice_vsi *vsi = np->vsi;
5995 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
5996 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
5997 if (current_vlan_features ^ requested_vlan_features) {
5998 if ((features & NETIF_F_RXFCS) &&
5999 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6000 dev_err(ice_pf_to_dev(vsi->back),
6001 "To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6005 err = ice_set_vlan_offload_features(vsi, features);
6010 current_vlan_features = netdev->features &
6011 NETIF_VLAN_FILTERING_FEATURES;
6012 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6013 if (current_vlan_features ^ requested_vlan_features) {
6014 err = ice_set_vlan_filtering_features(vsi, features);
6023 * ice_set_loopback - turn on/off loopback mode on underlying PF
6025 * @ena: flag to indicate the on/off setting
6027 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6029 bool if_running = netif_running(vsi->netdev);
6032 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6033 ret = ice_down(vsi);
6035 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6039 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6041 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6049 * ice_set_features - set the netdev feature flags
6050 * @netdev: ptr to the netdev being adjusted
6051 * @features: the feature set that the stack is suggesting
6054 ice_set_features(struct net_device *netdev, netdev_features_t features)
6056 netdev_features_t changed = netdev->features ^ features;
6057 struct ice_netdev_priv *np = netdev_priv(netdev);
6058 struct ice_vsi *vsi = np->vsi;
6059 struct ice_pf *pf = vsi->back;
6062 /* Don't set any netdev advanced features with device in Safe Mode */
6063 if (ice_is_safe_mode(pf)) {
6064 dev_err(ice_pf_to_dev(pf),
6065 "Device is in Safe Mode - not enabling advanced netdev features\n");
6069 /* Do not change setting during reset */
6070 if (ice_is_reset_in_progress(pf->state)) {
6071 dev_err(ice_pf_to_dev(pf),
6072 "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6076 /* Multiple features can be changed in one call so keep features in
6077 * separate if/else statements to guarantee each feature is checked
6079 if (changed & NETIF_F_RXHASH)
6080 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6082 ret = ice_set_vlan_features(netdev, features);
6086 /* Turn on receive of FCS aka CRC, and after setting this
6087 * flag the packet data will have the 4 byte CRC appended
6089 if (changed & NETIF_F_RXFCS) {
6090 if ((features & NETIF_F_RXFCS) &&
6091 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6092 dev_err(ice_pf_to_dev(vsi->back),
6093 "To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6097 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6098 ret = ice_down_up(vsi);
6103 if (changed & NETIF_F_NTUPLE) {
6104 bool ena = !!(features & NETIF_F_NTUPLE);
6106 ice_vsi_manage_fdir(vsi, ena);
6107 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6110 /* don't turn off hw_tc_offload when ADQ is already enabled */
6111 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6112 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6116 if (changed & NETIF_F_HW_TC) {
6117 bool ena = !!(features & NETIF_F_HW_TC);
6119 ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6120 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6123 if (changed & NETIF_F_LOOPBACK)
6124 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6130 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6131 * @vsi: VSI to setup VLAN properties for
6133 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6137 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6141 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6145 return ice_vsi_add_vlan_zero(vsi);
6149 * ice_vsi_cfg - Setup the VSI
6150 * @vsi: the VSI being configured
6152 * Return 0 on success and negative value on error
6154 int ice_vsi_cfg(struct ice_vsi *vsi)
6159 ice_set_rx_mode(vsi->netdev);
6161 if (vsi->type != ICE_VSI_LB) {
6162 err = ice_vsi_vlan_setup(vsi);
6168 ice_vsi_cfg_dcb_rings(vsi);
6170 err = ice_vsi_cfg_lan_txqs(vsi);
6171 if (!err && ice_is_xdp_ena_vsi(vsi))
6172 err = ice_vsi_cfg_xdp_txqs(vsi);
6174 err = ice_vsi_cfg_rxqs(vsi);
6179 /* THEORY OF MODERATION:
6180 * The ice driver hardware works differently than the hardware that DIMLIB was
6181 * originally made for. ice hardware doesn't have packet count limits that
6182 * can trigger an interrupt, but it *does* have interrupt rate limit support,
6183 * which is hard-coded to a limit of 250,000 ints/second.
6184 * If not using dynamic moderation, the INTRL value can be modified
6185 * by ethtool rx-usecs-high.
6188 /* the throttle rate for interrupts, basically worst case delay before
6189 * an initial interrupt fires, value is stored in microseconds.
6194 /* Make a different profile for Rx that doesn't allow quite so aggressive
6195 * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6198 static const struct ice_dim rx_profile[] = {
6199 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6200 {8}, /* 125,000 ints/s */
6201 {16}, /* 62,500 ints/s */
6202 {62}, /* 16,129 ints/s */
6203 {126} /* 7,936 ints/s */
6206 /* The transmit profile, which has the same sorts of values
6207 * as the previous struct
6209 static const struct ice_dim tx_profile[] = {
6210 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6211 {8}, /* 125,000 ints/s */
6212 {40}, /* 16,125 ints/s */
6213 {128}, /* 7,812 ints/s */
6214 {256} /* 3,906 ints/s */
6217 static void ice_tx_dim_work(struct work_struct *work)
6219 struct ice_ring_container *rc;
6223 dim = container_of(work, struct dim, work);
6224 rc = (struct ice_ring_container *)dim->priv;
6226 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6228 /* look up the values in our local table */
6229 itr = tx_profile[dim->profile_ix].itr;
6231 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6232 ice_write_itr(rc, itr);
6234 dim->state = DIM_START_MEASURE;
6237 static void ice_rx_dim_work(struct work_struct *work)
6239 struct ice_ring_container *rc;
6243 dim = container_of(work, struct dim, work);
6244 rc = (struct ice_ring_container *)dim->priv;
6246 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6248 /* look up the values in our local table */
6249 itr = rx_profile[dim->profile_ix].itr;
6251 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6252 ice_write_itr(rc, itr);
6254 dim->state = DIM_START_MEASURE;
6257 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6260 * ice_init_moderation - set up interrupt moderation
6261 * @q_vector: the vector containing rings to be configured
6263 * Set up interrupt moderation registers, with the intent to do the right thing
6264 * when called from reset or from probe, and whether or not dynamic moderation
6265 * is enabled or not. Take special care to write all the registers in both
6266 * dynamic moderation mode or not in order to make sure hardware is in a known
6269 static void ice_init_moderation(struct ice_q_vector *q_vector)
6271 struct ice_ring_container *rc;
6272 bool tx_dynamic, rx_dynamic;
6275 INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6276 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6277 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6279 tx_dynamic = ITR_IS_DYNAMIC(rc);
6281 /* set the initial TX ITR to match the above */
6282 ice_write_itr(rc, tx_dynamic ?
6283 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6286 INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6287 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6288 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6290 rx_dynamic = ITR_IS_DYNAMIC(rc);
6292 /* set the initial RX ITR to match the above */
6293 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6296 ice_set_q_vector_intrl(q_vector);
6300 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6301 * @vsi: the VSI being configured
6303 static void ice_napi_enable_all(struct ice_vsi *vsi)
6310 ice_for_each_q_vector(vsi, q_idx) {
6311 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6313 ice_init_moderation(q_vector);
6315 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6316 napi_enable(&q_vector->napi);
6321 * ice_up_complete - Finish the last steps of bringing up a connection
6322 * @vsi: The VSI being configured
6324 * Return 0 on success and negative value on error
6326 static int ice_up_complete(struct ice_vsi *vsi)
6328 struct ice_pf *pf = vsi->back;
6331 ice_vsi_cfg_msix(vsi);
6333 /* Enable only Rx rings, Tx rings were enabled by the FW when the
6334 * Tx queue group list was configured and the context bits were
6335 * programmed using ice_vsi_cfg_txqs
6337 err = ice_vsi_start_all_rx_rings(vsi);
6341 clear_bit(ICE_VSI_DOWN, vsi->state);
6342 ice_napi_enable_all(vsi);
6343 ice_vsi_ena_irq(vsi);
6345 if (vsi->port_info &&
6346 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6348 ice_print_link_msg(vsi, true);
6349 netif_tx_start_all_queues(vsi->netdev);
6350 netif_carrier_on(vsi->netdev);
6351 ice_ptp_link_change(pf, pf->hw.pf_id, true);
6354 /* Perform an initial read of the statistics registers now to
6355 * set the baseline so counters are ready when interface is up
6357 ice_update_eth_stats(vsi);
6358 ice_service_task_schedule(pf);
6364 * ice_up - Bring the connection back up after being down
6365 * @vsi: VSI being configured
6367 int ice_up(struct ice_vsi *vsi)
6371 err = ice_vsi_cfg(vsi);
6373 err = ice_up_complete(vsi);
6379 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6380 * @syncp: pointer to u64_stats_sync
6381 * @stats: stats that pkts and bytes count will be taken from
6382 * @pkts: packets stats counter
6383 * @bytes: bytes stats counter
6385 * This function fetches stats from the ring considering the atomic operations
6386 * that needs to be performed to read u64 values in 32 bit machine.
6389 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6390 struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6395 start = u64_stats_fetch_begin(syncp);
6397 *bytes = stats.bytes;
6398 } while (u64_stats_fetch_retry(syncp, start));
6402 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6403 * @vsi: the VSI to be updated
6404 * @vsi_stats: the stats struct to be updated
6405 * @rings: rings to work on
6406 * @count: number of rings
6409 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6410 struct rtnl_link_stats64 *vsi_stats,
6411 struct ice_tx_ring **rings, u16 count)
6415 for (i = 0; i < count; i++) {
6416 struct ice_tx_ring *ring;
6417 u64 pkts = 0, bytes = 0;
6419 ring = READ_ONCE(rings[i]);
6420 if (!ring || !ring->ring_stats)
6422 ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6423 ring->ring_stats->stats, &pkts,
6425 vsi_stats->tx_packets += pkts;
6426 vsi_stats->tx_bytes += bytes;
6427 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6428 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6429 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6434 * ice_update_vsi_ring_stats - Update VSI stats counters
6435 * @vsi: the VSI to be updated
6437 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6439 struct rtnl_link_stats64 *net_stats, *stats_prev;
6440 struct rtnl_link_stats64 *vsi_stats;
6444 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6448 /* reset non-netdev (extended) stats */
6449 vsi->tx_restart = 0;
6451 vsi->tx_linearize = 0;
6452 vsi->rx_buf_failed = 0;
6453 vsi->rx_page_failed = 0;
6457 /* update Tx rings counters */
6458 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6461 /* update Rx rings counters */
6462 ice_for_each_rxq(vsi, i) {
6463 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6464 struct ice_ring_stats *ring_stats;
6466 ring_stats = ring->ring_stats;
6467 ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6468 ring_stats->stats, &pkts,
6470 vsi_stats->rx_packets += pkts;
6471 vsi_stats->rx_bytes += bytes;
6472 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6473 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6476 /* update XDP Tx rings counters */
6477 if (ice_is_xdp_ena_vsi(vsi))
6478 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6483 net_stats = &vsi->net_stats;
6484 stats_prev = &vsi->net_stats_prev;
6486 /* clear prev counters after reset */
6487 if (vsi_stats->tx_packets < stats_prev->tx_packets ||
6488 vsi_stats->rx_packets < stats_prev->rx_packets) {
6489 stats_prev->tx_packets = 0;
6490 stats_prev->tx_bytes = 0;
6491 stats_prev->rx_packets = 0;
6492 stats_prev->rx_bytes = 0;
6495 /* update netdev counters */
6496 net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6497 net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6498 net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6499 net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6501 stats_prev->tx_packets = vsi_stats->tx_packets;
6502 stats_prev->tx_bytes = vsi_stats->tx_bytes;
6503 stats_prev->rx_packets = vsi_stats->rx_packets;
6504 stats_prev->rx_bytes = vsi_stats->rx_bytes;
6510 * ice_update_vsi_stats - Update VSI stats counters
6511 * @vsi: the VSI to be updated
6513 void ice_update_vsi_stats(struct ice_vsi *vsi)
6515 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6516 struct ice_eth_stats *cur_es = &vsi->eth_stats;
6517 struct ice_pf *pf = vsi->back;
6519 if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6520 test_bit(ICE_CFG_BUSY, pf->state))
6523 /* get stats as recorded by Tx/Rx rings */
6524 ice_update_vsi_ring_stats(vsi);
6526 /* get VSI stats as recorded by the hardware */
6527 ice_update_eth_stats(vsi);
6529 cur_ns->tx_errors = cur_es->tx_errors;
6530 cur_ns->rx_dropped = cur_es->rx_discards;
6531 cur_ns->tx_dropped = cur_es->tx_discards;
6532 cur_ns->multicast = cur_es->rx_multicast;
6534 /* update some more netdev stats if this is main VSI */
6535 if (vsi->type == ICE_VSI_PF) {
6536 cur_ns->rx_crc_errors = pf->stats.crc_errors;
6537 cur_ns->rx_errors = pf->stats.crc_errors +
6538 pf->stats.illegal_bytes +
6539 pf->stats.rx_len_errors +
6540 pf->stats.rx_undersize +
6541 pf->hw_csum_rx_error +
6542 pf->stats.rx_jabber +
6543 pf->stats.rx_fragments +
6544 pf->stats.rx_oversize;
6545 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
6546 /* record drops from the port level */
6547 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6552 * ice_update_pf_stats - Update PF port stats counters
6553 * @pf: PF whose stats needs to be updated
6555 void ice_update_pf_stats(struct ice_pf *pf)
6557 struct ice_hw_port_stats *prev_ps, *cur_ps;
6558 struct ice_hw *hw = &pf->hw;
6562 port = hw->port_info->lport;
6563 prev_ps = &pf->stats_prev;
6564 cur_ps = &pf->stats;
6566 if (ice_is_reset_in_progress(pf->state))
6567 pf->stat_prev_loaded = false;
6569 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6570 &prev_ps->eth.rx_bytes,
6571 &cur_ps->eth.rx_bytes);
6573 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6574 &prev_ps->eth.rx_unicast,
6575 &cur_ps->eth.rx_unicast);
6577 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6578 &prev_ps->eth.rx_multicast,
6579 &cur_ps->eth.rx_multicast);
6581 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6582 &prev_ps->eth.rx_broadcast,
6583 &cur_ps->eth.rx_broadcast);
6585 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6586 &prev_ps->eth.rx_discards,
6587 &cur_ps->eth.rx_discards);
6589 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6590 &prev_ps->eth.tx_bytes,
6591 &cur_ps->eth.tx_bytes);
6593 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6594 &prev_ps->eth.tx_unicast,
6595 &cur_ps->eth.tx_unicast);
6597 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6598 &prev_ps->eth.tx_multicast,
6599 &cur_ps->eth.tx_multicast);
6601 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6602 &prev_ps->eth.tx_broadcast,
6603 &cur_ps->eth.tx_broadcast);
6605 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6606 &prev_ps->tx_dropped_link_down,
6607 &cur_ps->tx_dropped_link_down);
6609 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6610 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6612 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6613 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6615 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6616 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6618 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6619 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6621 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6622 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6624 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6625 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6627 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6628 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6630 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
6631 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6633 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
6634 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6636 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
6637 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6639 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
6640 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6642 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6643 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6645 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6646 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6648 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6649 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6651 fd_ctr_base = hw->fd_ctr_base;
6653 ice_stat_update40(hw,
6654 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
6655 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6656 &cur_ps->fd_sb_match);
6657 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6658 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6660 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6661 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6663 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6664 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6666 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6667 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6669 ice_update_dcb_stats(pf);
6671 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6672 &prev_ps->crc_errors, &cur_ps->crc_errors);
6674 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6675 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6677 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6678 &prev_ps->mac_local_faults,
6679 &cur_ps->mac_local_faults);
6681 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6682 &prev_ps->mac_remote_faults,
6683 &cur_ps->mac_remote_faults);
6685 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
6686 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6688 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6689 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6691 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6692 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6694 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6695 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6697 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6698 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6700 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6702 pf->stat_prev_loaded = true;
6706 * ice_get_stats64 - get statistics for network device structure
6707 * @netdev: network interface device structure
6708 * @stats: main device statistics structure
6711 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6713 struct ice_netdev_priv *np = netdev_priv(netdev);
6714 struct rtnl_link_stats64 *vsi_stats;
6715 struct ice_vsi *vsi = np->vsi;
6717 vsi_stats = &vsi->net_stats;
6719 if (!vsi->num_txq || !vsi->num_rxq)
6722 /* netdev packet/byte stats come from ring counter. These are obtained
6723 * by summing up ring counters (done by ice_update_vsi_ring_stats).
6724 * But, only call the update routine and read the registers if VSI is
6727 if (!test_bit(ICE_VSI_DOWN, vsi->state))
6728 ice_update_vsi_ring_stats(vsi);
6729 stats->tx_packets = vsi_stats->tx_packets;
6730 stats->tx_bytes = vsi_stats->tx_bytes;
6731 stats->rx_packets = vsi_stats->rx_packets;
6732 stats->rx_bytes = vsi_stats->rx_bytes;
6734 /* The rest of the stats can be read from the hardware but instead we
6735 * just return values that the watchdog task has already obtained from
6738 stats->multicast = vsi_stats->multicast;
6739 stats->tx_errors = vsi_stats->tx_errors;
6740 stats->tx_dropped = vsi_stats->tx_dropped;
6741 stats->rx_errors = vsi_stats->rx_errors;
6742 stats->rx_dropped = vsi_stats->rx_dropped;
6743 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6744 stats->rx_length_errors = vsi_stats->rx_length_errors;
6748 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6749 * @vsi: VSI having NAPI disabled
6751 static void ice_napi_disable_all(struct ice_vsi *vsi)
6758 ice_for_each_q_vector(vsi, q_idx) {
6759 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6761 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6762 napi_disable(&q_vector->napi);
6764 cancel_work_sync(&q_vector->tx.dim.work);
6765 cancel_work_sync(&q_vector->rx.dim.work);
6770 * ice_down - Shutdown the connection
6771 * @vsi: The VSI being stopped
6773 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
6775 int ice_down(struct ice_vsi *vsi)
6777 int i, tx_err, rx_err, vlan_err = 0;
6779 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
6781 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6782 vlan_err = ice_vsi_del_vlan_zero(vsi);
6783 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
6784 netif_carrier_off(vsi->netdev);
6785 netif_tx_disable(vsi->netdev);
6786 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
6787 ice_eswitch_stop_all_tx_queues(vsi->back);
6790 ice_vsi_dis_irq(vsi);
6792 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6794 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6795 vsi->vsi_num, tx_err);
6796 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6797 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6799 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6800 vsi->vsi_num, tx_err);
6803 rx_err = ice_vsi_stop_all_rx_rings(vsi);
6805 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6806 vsi->vsi_num, rx_err);
6808 ice_napi_disable_all(vsi);
6810 ice_for_each_txq(vsi, i)
6811 ice_clean_tx_ring(vsi->tx_rings[i]);
6813 ice_for_each_rxq(vsi, i)
6814 ice_clean_rx_ring(vsi->rx_rings[i]);
6816 if (tx_err || rx_err || vlan_err) {
6817 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6818 vsi->vsi_num, vsi->vsw->sw_id);
6826 * ice_down_up - shutdown the VSI connection and bring it up
6827 * @vsi: the VSI to be reconnected
6829 int ice_down_up(struct ice_vsi *vsi)
6833 /* if DOWN already set, nothing to do */
6834 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
6837 ret = ice_down(vsi);
6843 netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
6851 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6852 * @vsi: VSI having resources allocated
6854 * Return 0 on success, negative on failure
6856 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6860 if (!vsi->num_txq) {
6861 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6866 ice_for_each_txq(vsi, i) {
6867 struct ice_tx_ring *ring = vsi->tx_rings[i];
6873 ring->netdev = vsi->netdev;
6874 err = ice_setup_tx_ring(ring);
6883 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6884 * @vsi: VSI having resources allocated
6886 * Return 0 on success, negative on failure
6888 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
6892 if (!vsi->num_rxq) {
6893 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
6898 ice_for_each_rxq(vsi, i) {
6899 struct ice_rx_ring *ring = vsi->rx_rings[i];
6905 ring->netdev = vsi->netdev;
6906 err = ice_setup_rx_ring(ring);
6915 * ice_vsi_open_ctrl - open control VSI for use
6916 * @vsi: the VSI to open
6918 * Initialization of the Control VSI
6920 * Returns 0 on success, negative value on error
6922 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
6924 char int_name[ICE_INT_NAME_STR_LEN];
6925 struct ice_pf *pf = vsi->back;
6929 dev = ice_pf_to_dev(pf);
6930 /* allocate descriptors */
6931 err = ice_vsi_setup_tx_rings(vsi);
6935 err = ice_vsi_setup_rx_rings(vsi);
6939 err = ice_vsi_cfg(vsi);
6943 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6944 dev_driver_string(dev), dev_name(dev));
6945 err = ice_vsi_req_irq_msix(vsi, int_name);
6949 ice_vsi_cfg_msix(vsi);
6951 err = ice_vsi_start_all_rx_rings(vsi);
6953 goto err_up_complete;
6955 clear_bit(ICE_VSI_DOWN, vsi->state);
6956 ice_vsi_ena_irq(vsi);
6963 ice_vsi_free_rx_rings(vsi);
6965 ice_vsi_free_tx_rings(vsi);
6971 * ice_vsi_open - Called when a network interface is made active
6972 * @vsi: the VSI to open
6974 * Initialization of the VSI
6976 * Returns 0 on success, negative value on error
6978 int ice_vsi_open(struct ice_vsi *vsi)
6980 char int_name[ICE_INT_NAME_STR_LEN];
6981 struct ice_pf *pf = vsi->back;
6984 /* allocate descriptors */
6985 err = ice_vsi_setup_tx_rings(vsi);
6989 err = ice_vsi_setup_rx_rings(vsi);
6993 err = ice_vsi_cfg(vsi);
6997 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
6998 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
6999 err = ice_vsi_req_irq_msix(vsi, int_name);
7003 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7005 if (vsi->type == ICE_VSI_PF) {
7006 /* Notify the stack of the actual queue counts. */
7007 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7011 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7016 err = ice_up_complete(vsi);
7018 goto err_up_complete;
7025 ice_vsi_free_irq(vsi);
7027 ice_vsi_free_rx_rings(vsi);
7029 ice_vsi_free_tx_rings(vsi);
7035 * ice_vsi_release_all - Delete all VSIs
7036 * @pf: PF from which all VSIs are being removed
7038 static void ice_vsi_release_all(struct ice_pf *pf)
7045 ice_for_each_vsi(pf, i) {
7049 if (pf->vsi[i]->type == ICE_VSI_CHNL)
7052 err = ice_vsi_release(pf->vsi[i]);
7054 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7055 i, err, pf->vsi[i]->vsi_num);
7060 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7061 * @pf: pointer to the PF instance
7062 * @type: VSI type to rebuild
7064 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7066 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7068 struct device *dev = ice_pf_to_dev(pf);
7071 ice_for_each_vsi(pf, i) {
7072 struct ice_vsi *vsi = pf->vsi[i];
7074 if (!vsi || vsi->type != type)
7077 /* rebuild the VSI */
7078 err = ice_vsi_rebuild(vsi, true);
7080 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7081 err, vsi->idx, ice_vsi_type_str(type));
7085 /* replay filters for the VSI */
7086 err = ice_replay_vsi(&pf->hw, vsi->idx);
7088 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7089 err, vsi->idx, ice_vsi_type_str(type));
7093 /* Re-map HW VSI number, using VSI handle that has been
7094 * previously validated in ice_replay_vsi() call above
7096 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7098 /* enable the VSI */
7099 err = ice_ena_vsi(vsi, false);
7101 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7102 err, vsi->idx, ice_vsi_type_str(type));
7106 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7107 ice_vsi_type_str(type));
7114 * ice_update_pf_netdev_link - Update PF netdev link status
7115 * @pf: pointer to the PF instance
7117 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7122 ice_for_each_vsi(pf, i) {
7123 struct ice_vsi *vsi = pf->vsi[i];
7125 if (!vsi || vsi->type != ICE_VSI_PF)
7128 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7130 netif_carrier_on(pf->vsi[i]->netdev);
7131 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7133 netif_carrier_off(pf->vsi[i]->netdev);
7134 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7140 * ice_rebuild - rebuild after reset
7141 * @pf: PF to rebuild
7142 * @reset_type: type of reset
7144 * Do not rebuild VF VSI in this flow because that is already handled via
7145 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7146 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7147 * to reset/rebuild all the VF VSI twice.
7149 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7151 struct device *dev = ice_pf_to_dev(pf);
7152 struct ice_hw *hw = &pf->hw;
7156 if (test_bit(ICE_DOWN, pf->state))
7157 goto clear_recovery;
7159 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7161 #define ICE_EMP_RESET_SLEEP_MS 5000
7162 if (reset_type == ICE_RESET_EMPR) {
7163 /* If an EMP reset has occurred, any previously pending flash
7164 * update will have completed. We no longer know whether or
7165 * not the NVM update EMP reset is restricted.
7167 pf->fw_emp_reset_disabled = false;
7169 msleep(ICE_EMP_RESET_SLEEP_MS);
7172 err = ice_init_all_ctrlq(hw);
7174 dev_err(dev, "control queues init failed %d\n", err);
7175 goto err_init_ctrlq;
7178 /* if DDP was previously loaded successfully */
7179 if (!ice_is_safe_mode(pf)) {
7180 /* reload the SW DB of filter tables */
7181 if (reset_type == ICE_RESET_PFR)
7182 ice_fill_blk_tbls(hw);
7184 /* Reload DDP Package after CORER/GLOBR reset */
7185 ice_load_pkg(NULL, pf);
7188 err = ice_clear_pf_cfg(hw);
7190 dev_err(dev, "clear PF configuration failed %d\n", err);
7191 goto err_init_ctrlq;
7194 ice_clear_pxe_mode(hw);
7196 err = ice_init_nvm(hw);
7198 dev_err(dev, "ice_init_nvm failed %d\n", err);
7199 goto err_init_ctrlq;
7202 err = ice_get_caps(hw);
7204 dev_err(dev, "ice_get_caps failed %d\n", err);
7205 goto err_init_ctrlq;
7208 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7210 dev_err(dev, "set_mac_cfg failed %d\n", err);
7211 goto err_init_ctrlq;
7214 dvm = ice_is_dvm_ena(hw);
7216 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7218 goto err_init_ctrlq;
7220 err = ice_sched_init_port(hw->port_info);
7222 goto err_sched_init_port;
7224 /* start misc vector */
7225 err = ice_req_irq_msix_misc(pf);
7227 dev_err(dev, "misc vector setup failed: %d\n", err);
7228 goto err_sched_init_port;
7231 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7232 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7233 if (!rd32(hw, PFQF_FD_SIZE)) {
7234 u16 unused, guar, b_effort;
7236 guar = hw->func_caps.fd_fltr_guar;
7237 b_effort = hw->func_caps.fd_fltr_best_effort;
7239 /* force guaranteed filter pool for PF */
7240 ice_alloc_fd_guar_item(hw, &unused, guar);
7241 /* force shared filter pool for PF */
7242 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7246 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7247 ice_dcb_rebuild(pf);
7249 /* If the PF previously had enabled PTP, PTP init needs to happen before
7250 * the VSI rebuild. If not, this causes the PTP link status events to
7253 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7256 if (ice_is_feature_supported(pf, ICE_F_GNSS))
7259 /* rebuild PF VSI */
7260 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7262 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7263 goto err_vsi_rebuild;
7266 /* configure PTP timestamping after VSI rebuild */
7267 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7268 ice_ptp_cfg_timestamp(pf, false);
7270 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
7272 dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
7273 goto err_vsi_rebuild;
7276 if (reset_type == ICE_RESET_PFR) {
7277 err = ice_rebuild_channels(pf);
7279 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7281 goto err_vsi_rebuild;
7285 /* If Flow Director is active */
7286 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7287 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7289 dev_err(dev, "control VSI rebuild failed: %d\n", err);
7290 goto err_vsi_rebuild;
7293 /* replay HW Flow Director recipes */
7295 ice_fdir_replay_flows(hw);
7297 /* replay Flow Director filters */
7298 ice_fdir_replay_fltrs(pf);
7300 ice_rebuild_arfs(pf);
7303 ice_update_pf_netdev_link(pf);
7305 /* tell the firmware we are up */
7306 err = ice_send_version(pf);
7308 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7310 goto err_vsi_rebuild;
7313 ice_replay_post(hw);
7315 /* if we get here, reset flow is successful */
7316 clear_bit(ICE_RESET_FAILED, pf->state);
7318 ice_plug_aux_dev(pf);
7322 err_sched_init_port:
7323 ice_sched_cleanup_all(hw);
7325 ice_shutdown_all_ctrlq(hw);
7326 set_bit(ICE_RESET_FAILED, pf->state);
7328 /* set this bit in PF state to control service task scheduling */
7329 set_bit(ICE_NEEDS_RESTART, pf->state);
7330 dev_err(dev, "Rebuild failed, unload and reload driver\n");
7334 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
7335 * @vsi: Pointer to VSI structure
7337 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
7339 if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
7340 return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
7342 return ICE_RXBUF_3072;
7346 * ice_change_mtu - NDO callback to change the MTU
7347 * @netdev: network interface device structure
7348 * @new_mtu: new value for maximum frame size
7350 * Returns 0 on success, negative on failure
7352 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7354 struct ice_netdev_priv *np = netdev_priv(netdev);
7355 struct ice_vsi *vsi = np->vsi;
7356 struct ice_pf *pf = vsi->back;
7360 if (new_mtu == (int)netdev->mtu) {
7361 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7365 if (ice_is_xdp_ena_vsi(vsi)) {
7366 int frame_size = ice_max_xdp_frame_size(vsi);
7368 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7369 netdev_err(netdev, "max MTU for XDP usage is %d\n",
7370 frame_size - ICE_ETH_PKT_HDR_PAD);
7375 /* if a reset is in progress, wait for some time for it to complete */
7377 if (ice_is_reset_in_progress(pf->state)) {
7379 usleep_range(1000, 2000);
7384 } while (count < 100);
7387 netdev_err(netdev, "can't change MTU. Device is busy\n");
7391 netdev->mtu = (unsigned int)new_mtu;
7393 /* if VSI is up, bring it down and then back up */
7394 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
7395 err = ice_down(vsi);
7397 netdev_err(netdev, "change MTU if_down err %d\n", err);
7403 netdev_err(netdev, "change MTU if_up err %d\n", err);
7408 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7409 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7415 * ice_eth_ioctl - Access the hwtstamp interface
7416 * @netdev: network interface device structure
7417 * @ifr: interface request data
7418 * @cmd: ioctl command
7420 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7422 struct ice_netdev_priv *np = netdev_priv(netdev);
7423 struct ice_pf *pf = np->vsi->back;
7427 return ice_ptp_get_ts_config(pf, ifr);
7429 return ice_ptp_set_ts_config(pf, ifr);
7436 * ice_aq_str - convert AQ err code to a string
7437 * @aq_err: the AQ error code to convert
7439 const char *ice_aq_str(enum ice_aq_err aq_err)
7444 case ICE_AQ_RC_EPERM:
7445 return "ICE_AQ_RC_EPERM";
7446 case ICE_AQ_RC_ENOENT:
7447 return "ICE_AQ_RC_ENOENT";
7448 case ICE_AQ_RC_ENOMEM:
7449 return "ICE_AQ_RC_ENOMEM";
7450 case ICE_AQ_RC_EBUSY:
7451 return "ICE_AQ_RC_EBUSY";
7452 case ICE_AQ_RC_EEXIST:
7453 return "ICE_AQ_RC_EEXIST";
7454 case ICE_AQ_RC_EINVAL:
7455 return "ICE_AQ_RC_EINVAL";
7456 case ICE_AQ_RC_ENOSPC:
7457 return "ICE_AQ_RC_ENOSPC";
7458 case ICE_AQ_RC_ENOSYS:
7459 return "ICE_AQ_RC_ENOSYS";
7460 case ICE_AQ_RC_EMODE:
7461 return "ICE_AQ_RC_EMODE";
7462 case ICE_AQ_RC_ENOSEC:
7463 return "ICE_AQ_RC_ENOSEC";
7464 case ICE_AQ_RC_EBADSIG:
7465 return "ICE_AQ_RC_EBADSIG";
7466 case ICE_AQ_RC_ESVN:
7467 return "ICE_AQ_RC_ESVN";
7468 case ICE_AQ_RC_EBADMAN:
7469 return "ICE_AQ_RC_EBADMAN";
7470 case ICE_AQ_RC_EBADBUF:
7471 return "ICE_AQ_RC_EBADBUF";
7474 return "ICE_AQ_RC_UNKNOWN";
7478 * ice_set_rss_lut - Set RSS LUT
7479 * @vsi: Pointer to VSI structure
7480 * @lut: Lookup table
7481 * @lut_size: Lookup table size
7483 * Returns 0 on success, negative on failure
7485 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7487 struct ice_aq_get_set_rss_lut_params params = {};
7488 struct ice_hw *hw = &vsi->back->hw;
7494 params.vsi_handle = vsi->idx;
7495 params.lut_size = lut_size;
7496 params.lut_type = vsi->rss_lut_type;
7499 status = ice_aq_set_rss_lut(hw, ¶ms);
7501 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7502 status, ice_aq_str(hw->adminq.sq_last_status));
7508 * ice_set_rss_key - Set RSS key
7509 * @vsi: Pointer to the VSI structure
7510 * @seed: RSS hash seed
7512 * Returns 0 on success, negative on failure
7514 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7516 struct ice_hw *hw = &vsi->back->hw;
7522 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7524 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7525 status, ice_aq_str(hw->adminq.sq_last_status));
7531 * ice_get_rss_lut - Get RSS LUT
7532 * @vsi: Pointer to VSI structure
7533 * @lut: Buffer to store the lookup table entries
7534 * @lut_size: Size of buffer to store the lookup table entries
7536 * Returns 0 on success, negative on failure
7538 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7540 struct ice_aq_get_set_rss_lut_params params = {};
7541 struct ice_hw *hw = &vsi->back->hw;
7547 params.vsi_handle = vsi->idx;
7548 params.lut_size = lut_size;
7549 params.lut_type = vsi->rss_lut_type;
7552 status = ice_aq_get_rss_lut(hw, ¶ms);
7554 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7555 status, ice_aq_str(hw->adminq.sq_last_status));
7561 * ice_get_rss_key - Get RSS key
7562 * @vsi: Pointer to VSI structure
7563 * @seed: Buffer to store the key in
7565 * Returns 0 on success, negative on failure
7567 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7569 struct ice_hw *hw = &vsi->back->hw;
7575 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7577 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7578 status, ice_aq_str(hw->adminq.sq_last_status));
7584 * ice_bridge_getlink - Get the hardware bridge mode
7587 * @seq: RTNL message seq
7588 * @dev: the netdev being configured
7589 * @filter_mask: filter mask passed in
7590 * @nlflags: netlink flags passed in
7592 * Return the bridge mode (VEB/VEPA)
7595 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7596 struct net_device *dev, u32 filter_mask, int nlflags)
7598 struct ice_netdev_priv *np = netdev_priv(dev);
7599 struct ice_vsi *vsi = np->vsi;
7600 struct ice_pf *pf = vsi->back;
7603 bmode = pf->first_sw->bridge_mode;
7605 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7610 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7611 * @vsi: Pointer to VSI structure
7612 * @bmode: Hardware bridge mode (VEB/VEPA)
7614 * Returns 0 on success, negative on failure
7616 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7618 struct ice_aqc_vsi_props *vsi_props;
7619 struct ice_hw *hw = &vsi->back->hw;
7620 struct ice_vsi_ctx *ctxt;
7623 vsi_props = &vsi->info;
7625 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7629 ctxt->info = vsi->info;
7631 if (bmode == BRIDGE_MODE_VEB)
7632 /* change from VEPA to VEB mode */
7633 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7635 /* change from VEB to VEPA mode */
7636 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7637 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
7639 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7641 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
7642 bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
7645 /* Update sw flags for book keeping */
7646 vsi_props->sw_flags = ctxt->info.sw_flags;
7654 * ice_bridge_setlink - Set the hardware bridge mode
7655 * @dev: the netdev being configured
7656 * @nlh: RTNL message
7657 * @flags: bridge setlink flags
7658 * @extack: netlink extended ack
7660 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7661 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7662 * not already set for all VSIs connected to this switch. And also update the
7663 * unicast switch filter rules for the corresponding switch of the netdev.
7666 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7667 u16 __always_unused flags,
7668 struct netlink_ext_ack __always_unused *extack)
7670 struct ice_netdev_priv *np = netdev_priv(dev);
7671 struct ice_pf *pf = np->vsi->back;
7672 struct nlattr *attr, *br_spec;
7673 struct ice_hw *hw = &pf->hw;
7674 struct ice_sw *pf_sw;
7675 int rem, v, err = 0;
7677 pf_sw = pf->first_sw;
7678 /* find the attribute in the netlink message */
7679 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7681 nla_for_each_nested(attr, br_spec, rem) {
7684 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7686 mode = nla_get_u16(attr);
7687 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7689 /* Continue if bridge mode is not being flipped */
7690 if (mode == pf_sw->bridge_mode)
7692 /* Iterates through the PF VSI list and update the loopback
7695 ice_for_each_vsi(pf, v) {
7698 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7703 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7704 /* Update the unicast switch filter rules for the corresponding
7705 * switch of the netdev
7707 err = ice_update_sw_rule_bridge_mode(hw);
7709 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
7711 ice_aq_str(hw->adminq.sq_last_status));
7712 /* revert hw->evb_veb */
7713 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7717 pf_sw->bridge_mode = mode;
7724 * ice_tx_timeout - Respond to a Tx Hang
7725 * @netdev: network interface device structure
7726 * @txqueue: Tx queue
7728 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7730 struct ice_netdev_priv *np = netdev_priv(netdev);
7731 struct ice_tx_ring *tx_ring = NULL;
7732 struct ice_vsi *vsi = np->vsi;
7733 struct ice_pf *pf = vsi->back;
7736 pf->tx_timeout_count++;
7738 /* Check if PFC is enabled for the TC to which the queue belongs
7739 * to. If yes then Tx timeout is not caused by a hung queue, no
7740 * need to reset and rebuild
7742 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7743 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7748 /* now that we have an index, find the tx_ring struct */
7749 ice_for_each_txq(vsi, i)
7750 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7751 if (txqueue == vsi->tx_rings[i]->q_index) {
7752 tx_ring = vsi->tx_rings[i];
7756 /* Reset recovery level if enough time has elapsed after last timeout.
7757 * Also ensure no new reset action happens before next timeout period.
7759 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7760 pf->tx_timeout_recovery_level = 1;
7761 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7762 netdev->watchdog_timeo)))
7766 struct ice_hw *hw = &pf->hw;
7769 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7770 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7771 /* Read interrupt register */
7772 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7774 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7775 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7776 head, tx_ring->next_to_use, val);
7779 pf->tx_timeout_last_recovery = jiffies;
7780 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7781 pf->tx_timeout_recovery_level, txqueue);
7783 switch (pf->tx_timeout_recovery_level) {
7785 set_bit(ICE_PFR_REQ, pf->state);
7788 set_bit(ICE_CORER_REQ, pf->state);
7791 set_bit(ICE_GLOBR_REQ, pf->state);
7794 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7795 set_bit(ICE_DOWN, pf->state);
7796 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7797 set_bit(ICE_SERVICE_DIS, pf->state);
7801 ice_service_task_schedule(pf);
7802 pf->tx_timeout_recovery_level++;
7806 * ice_setup_tc_cls_flower - flower classifier offloads
7807 * @np: net device to configure
7808 * @filter_dev: device on which filter is added
7809 * @cls_flower: offload data
7812 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
7813 struct net_device *filter_dev,
7814 struct flow_cls_offload *cls_flower)
7816 struct ice_vsi *vsi = np->vsi;
7818 if (cls_flower->common.chain_index)
7821 switch (cls_flower->command) {
7822 case FLOW_CLS_REPLACE:
7823 return ice_add_cls_flower(filter_dev, vsi, cls_flower);
7824 case FLOW_CLS_DESTROY:
7825 return ice_del_cls_flower(vsi, cls_flower);
7832 * ice_setup_tc_block_cb - callback handler registered for TC block
7833 * @type: TC SETUP type
7834 * @type_data: TC flower offload data that contains user input
7835 * @cb_priv: netdev private data
7838 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
7840 struct ice_netdev_priv *np = cb_priv;
7843 case TC_SETUP_CLSFLOWER:
7844 return ice_setup_tc_cls_flower(np, np->vsi->netdev,
7852 * ice_validate_mqprio_qopt - Validate TCF input parameters
7853 * @vsi: Pointer to VSI
7854 * @mqprio_qopt: input parameters for mqprio queue configuration
7856 * This function validates MQPRIO params, such as qcount (power of 2 wherever
7857 * needed), and make sure user doesn't specify qcount and BW rate limit
7858 * for TCs, which are more than "num_tc"
7861 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
7862 struct tc_mqprio_qopt_offload *mqprio_qopt)
7864 u64 sum_max_rate = 0, sum_min_rate = 0;
7865 int non_power_of_2_qcount = 0;
7866 struct ice_pf *pf = vsi->back;
7867 int max_rss_q_cnt = 0;
7872 if (vsi->type != ICE_VSI_PF)
7875 if (mqprio_qopt->qopt.offset[0] != 0 ||
7876 mqprio_qopt->qopt.num_tc < 1 ||
7877 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
7880 dev = ice_pf_to_dev(pf);
7881 vsi->ch_rss_size = 0;
7882 num_tc = mqprio_qopt->qopt.num_tc;
7884 for (i = 0; num_tc; i++) {
7885 int qcount = mqprio_qopt->qopt.count[i];
7886 u64 max_rate, min_rate, rem;
7891 if (is_power_of_2(qcount)) {
7892 if (non_power_of_2_qcount &&
7893 qcount > non_power_of_2_qcount) {
7894 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
7895 qcount, non_power_of_2_qcount);
7898 if (qcount > max_rss_q_cnt)
7899 max_rss_q_cnt = qcount;
7901 if (non_power_of_2_qcount &&
7902 qcount != non_power_of_2_qcount) {
7903 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
7904 qcount, non_power_of_2_qcount);
7907 if (qcount < max_rss_q_cnt) {
7908 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
7909 qcount, max_rss_q_cnt);
7912 max_rss_q_cnt = qcount;
7913 non_power_of_2_qcount = qcount;
7916 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
7917 * converts the bandwidth rate limit into Bytes/s when
7918 * passing it down to the driver. So convert input bandwidth
7919 * from Bytes/s to Kbps
7921 max_rate = mqprio_qopt->max_rate[i];
7922 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
7923 sum_max_rate += max_rate;
7925 /* min_rate is minimum guaranteed rate and it can't be zero */
7926 min_rate = mqprio_qopt->min_rate[i];
7927 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
7928 sum_min_rate += min_rate;
7930 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
7931 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
7932 min_rate, ICE_MIN_BW_LIMIT);
7936 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
7938 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
7939 i, ICE_MIN_BW_LIMIT);
7943 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
7945 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
7946 i, ICE_MIN_BW_LIMIT);
7950 /* min_rate can't be more than max_rate, except when max_rate
7951 * is zero (implies max_rate sought is max line rate). In such
7952 * a case min_rate can be more than max.
7954 if (max_rate && min_rate > max_rate) {
7955 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
7956 min_rate, max_rate);
7960 if (i >= mqprio_qopt->qopt.num_tc - 1)
7962 if (mqprio_qopt->qopt.offset[i + 1] !=
7963 (mqprio_qopt->qopt.offset[i] + qcount))
7967 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7970 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7973 speed = ice_get_link_speed_kbps(vsi);
7974 if (sum_max_rate && sum_max_rate > (u64)speed) {
7975 dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
7976 sum_max_rate, speed);
7979 if (sum_min_rate && sum_min_rate > (u64)speed) {
7980 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
7981 sum_min_rate, speed);
7985 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
7986 vsi->ch_rss_size = max_rss_q_cnt;
7992 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
7993 * @pf: ptr to PF device
7996 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
7998 struct device *dev = ice_pf_to_dev(pf);
8003 if (!(vsi->num_gfltr || vsi->num_bfltr))
8007 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8008 struct ice_fd_hw_prof *prof;
8012 if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8013 hw->fdir_prof[flow]->cnt))
8016 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8017 enum ice_flow_priority prio;
8020 /* add this VSI to FDir profile for this flow */
8021 prio = ICE_FLOW_PRIO_NORMAL;
8022 prof = hw->fdir_prof[flow];
8023 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
8024 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
8025 prof->vsi_h[0], vsi->idx,
8026 prio, prof->fdir_seg[tun],
8029 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8034 prof->entry_h[prof->cnt][tun] = entry_h;
8037 /* store VSI for filter replay and delete */
8038 prof->vsi_h[prof->cnt] = vsi->idx;
8042 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8047 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8053 * ice_add_channel - add a channel by adding VSI
8054 * @pf: ptr to PF device
8055 * @sw_id: underlying HW switching element ID
8056 * @ch: ptr to channel structure
8058 * Add a channel (VSI) using add_vsi and queue_map
8060 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8062 struct device *dev = ice_pf_to_dev(pf);
8063 struct ice_vsi *vsi;
8065 if (ch->type != ICE_VSI_CHNL) {
8066 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8070 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8071 if (!vsi || vsi->type != ICE_VSI_CHNL) {
8072 dev_err(dev, "create chnl VSI failure\n");
8076 ice_add_vsi_to_fdir(pf, vsi);
8079 ch->vsi_num = vsi->vsi_num;
8080 ch->info.mapping_flags = vsi->info.mapping_flags;
8082 /* set the back pointer of channel for newly created VSI */
8085 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8086 sizeof(vsi->info.q_mapping));
8087 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8088 sizeof(vsi->info.tc_mapping));
8095 * @vsi: the VSI being setup
8096 * @ch: ptr to channel structure
8098 * Configure channel specific resources such as rings, vector.
8100 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8104 for (i = 0; i < ch->num_txq; i++) {
8105 struct ice_q_vector *tx_q_vector, *rx_q_vector;
8106 struct ice_ring_container *rc;
8107 struct ice_tx_ring *tx_ring;
8108 struct ice_rx_ring *rx_ring;
8110 tx_ring = vsi->tx_rings[ch->base_q + i];
8111 rx_ring = vsi->rx_rings[ch->base_q + i];
8112 if (!tx_ring || !rx_ring)
8115 /* setup ring being channel enabled */
8119 /* following code block sets up vector specific attributes */
8120 tx_q_vector = tx_ring->q_vector;
8121 rx_q_vector = rx_ring->q_vector;
8122 if (!tx_q_vector && !rx_q_vector)
8126 tx_q_vector->ch = ch;
8127 /* setup Tx and Rx ITR setting if DIM is off */
8128 rc = &tx_q_vector->tx;
8129 if (!ITR_IS_DYNAMIC(rc))
8130 ice_write_itr(rc, rc->itr_setting);
8133 rx_q_vector->ch = ch;
8134 /* setup Tx and Rx ITR setting if DIM is off */
8135 rc = &rx_q_vector->rx;
8136 if (!ITR_IS_DYNAMIC(rc))
8137 ice_write_itr(rc, rc->itr_setting);
8141 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8142 * GLINT_ITR register would have written to perform in-context
8143 * update, hence perform flush
8145 if (ch->num_txq || ch->num_rxq)
8146 ice_flush(&vsi->back->hw);
8150 * ice_cfg_chnl_all_res - configure channel resources
8151 * @vsi: pte to main_vsi
8152 * @ch: ptr to channel structure
8154 * This function configures channel specific resources such as flow-director
8155 * counter index, and other resources such as queues, vectors, ITR settings
8158 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8160 /* configure channel (aka ADQ) resources such as queues, vectors,
8161 * ITR settings for channel specific vectors and anything else
8163 ice_chnl_cfg_res(vsi, ch);
8167 * ice_setup_hw_channel - setup new channel
8168 * @pf: ptr to PF device
8169 * @vsi: the VSI being setup
8170 * @ch: ptr to channel structure
8171 * @sw_id: underlying HW switching element ID
8172 * @type: type of channel to be created (VMDq2/VF)
8174 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8175 * and configures Tx rings accordingly
8178 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8179 struct ice_channel *ch, u16 sw_id, u8 type)
8181 struct device *dev = ice_pf_to_dev(pf);
8184 ch->base_q = vsi->next_base_q;
8187 ret = ice_add_channel(pf, sw_id, ch);
8189 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8193 /* configure/setup ADQ specific resources */
8194 ice_cfg_chnl_all_res(vsi, ch);
8196 /* make sure to update the next_base_q so that subsequent channel's
8197 * (aka ADQ) VSI queue map is correct
8199 vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8200 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8207 * ice_setup_channel - setup new channel using uplink element
8208 * @pf: ptr to PF device
8209 * @vsi: the VSI being setup
8210 * @ch: ptr to channel structure
8212 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8213 * and uplink switching element
8216 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8217 struct ice_channel *ch)
8219 struct device *dev = ice_pf_to_dev(pf);
8223 if (vsi->type != ICE_VSI_PF) {
8224 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8228 sw_id = pf->first_sw->sw_id;
8230 /* create channel (VSI) */
8231 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8233 dev_err(dev, "failed to setup hw_channel\n");
8236 dev_dbg(dev, "successfully created channel()\n");
8238 return ch->ch_vsi ? true : false;
8242 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8243 * @vsi: VSI to be configured
8244 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8245 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8248 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8252 err = ice_set_min_bw_limit(vsi, min_tx_rate);
8256 return ice_set_max_bw_limit(vsi, max_tx_rate);
8260 * ice_create_q_channel - function to create channel
8261 * @vsi: VSI to be configured
8262 * @ch: ptr to channel (it contains channel specific params)
8264 * This function creates channel (VSI) using num_queues specified by user,
8265 * reconfigs RSS if needed.
8267 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8269 struct ice_pf *pf = vsi->back;
8275 dev = ice_pf_to_dev(pf);
8276 if (!ch->num_txq || !ch->num_rxq) {
8277 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8281 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8282 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8283 vsi->cnt_q_avail, ch->num_txq);
8287 if (!ice_setup_channel(pf, vsi, ch)) {
8288 dev_info(dev, "Failed to setup channel\n");
8291 /* configure BW rate limit */
8292 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8295 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8298 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8299 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8301 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8302 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8305 vsi->cnt_q_avail -= ch->num_txq;
8311 * ice_rem_all_chnl_fltrs - removes all channel filters
8312 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8314 * Remove all advanced switch filters only if they are channel specific
8315 * tc-flower based filter
8317 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8319 struct ice_tc_flower_fltr *fltr;
8320 struct hlist_node *node;
8322 /* to remove all channel filters, iterate an ordered list of filters */
8323 hlist_for_each_entry_safe(fltr, node,
8324 &pf->tc_flower_fltr_list,
8326 struct ice_rule_query_data rule;
8329 /* for now process only channel specific filters */
8330 if (!ice_is_chnl_fltr(fltr))
8333 rule.rid = fltr->rid;
8334 rule.rule_id = fltr->rule_id;
8335 rule.vsi_handle = fltr->dest_vsi_handle;
8336 status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8338 if (status == -ENOENT)
8339 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8342 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8344 } else if (fltr->dest_vsi) {
8345 /* update advanced switch filter count */
8346 if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8347 u32 flags = fltr->flags;
8349 fltr->dest_vsi->num_chnl_fltr--;
8350 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8351 ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8352 pf->num_dmac_chnl_fltrs--;
8356 hlist_del(&fltr->tc_flower_node);
8362 * ice_remove_q_channels - Remove queue channels for the TCs
8363 * @vsi: VSI to be configured
8364 * @rem_fltr: delete advanced switch filter or not
8366 * Remove queue channels for the TCs
8368 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8370 struct ice_channel *ch, *ch_tmp;
8371 struct ice_pf *pf = vsi->back;
8374 /* remove all tc-flower based filter if they are channel filters only */
8376 ice_rem_all_chnl_fltrs(pf);
8378 /* remove ntuple filters since queue configuration is being changed */
8379 if (vsi->netdev->features & NETIF_F_NTUPLE) {
8380 struct ice_hw *hw = &pf->hw;
8382 mutex_lock(&hw->fdir_fltr_lock);
8383 ice_fdir_del_all_fltrs(vsi);
8384 mutex_unlock(&hw->fdir_fltr_lock);
8387 /* perform cleanup for channels if they exist */
8388 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8389 struct ice_vsi *ch_vsi;
8391 list_del(&ch->list);
8392 ch_vsi = ch->ch_vsi;
8398 /* Reset queue contexts */
8399 for (i = 0; i < ch->num_rxq; i++) {
8400 struct ice_tx_ring *tx_ring;
8401 struct ice_rx_ring *rx_ring;
8403 tx_ring = vsi->tx_rings[ch->base_q + i];
8404 rx_ring = vsi->rx_rings[ch->base_q + i];
8407 if (tx_ring->q_vector)
8408 tx_ring->q_vector->ch = NULL;
8412 if (rx_ring->q_vector)
8413 rx_ring->q_vector->ch = NULL;
8417 /* Release FD resources for the channel VSI */
8418 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8420 /* clear the VSI from scheduler tree */
8421 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8423 /* Delete VSI from FW */
8424 ice_vsi_delete(ch->ch_vsi);
8426 /* Delete VSI from PF and HW VSI arrays */
8427 ice_vsi_clear(ch->ch_vsi);
8429 /* free the channel */
8433 /* clear the channel VSI map which is stored in main VSI */
8434 ice_for_each_chnl_tc(i)
8435 vsi->tc_map_vsi[i] = NULL;
8437 /* reset main VSI's all TC information */
8443 * ice_rebuild_channels - rebuild channel
8446 * Recreate channel VSIs and replay filters
8448 static int ice_rebuild_channels(struct ice_pf *pf)
8450 struct device *dev = ice_pf_to_dev(pf);
8451 struct ice_vsi *main_vsi;
8452 bool rem_adv_fltr = true;
8453 struct ice_channel *ch;
8454 struct ice_vsi *vsi;
8458 main_vsi = ice_get_main_vsi(pf);
8462 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8463 main_vsi->old_numtc == 1)
8464 return 0; /* nothing to be done */
8466 /* reconfigure main VSI based on old value of TC and cached values
8469 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8471 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8472 main_vsi->old_ena_tc, main_vsi->vsi_num);
8476 /* rebuild ADQ VSIs */
8477 ice_for_each_vsi(pf, i) {
8478 enum ice_vsi_type type;
8481 if (!vsi || vsi->type != ICE_VSI_CHNL)
8486 /* rebuild ADQ VSI */
8487 err = ice_vsi_rebuild(vsi, true);
8489 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8490 ice_vsi_type_str(type), vsi->idx, err);
8494 /* Re-map HW VSI number, using VSI handle that has been
8495 * previously validated in ice_replay_vsi() call above
8497 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8499 /* replay filters for the VSI */
8500 err = ice_replay_vsi(&pf->hw, vsi->idx);
8502 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8503 ice_vsi_type_str(type), err, vsi->idx);
8504 rem_adv_fltr = false;
8507 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8508 ice_vsi_type_str(type), vsi->idx);
8510 /* store ADQ VSI at correct TC index in main VSI's
8513 main_vsi->tc_map_vsi[tc_idx++] = vsi;
8516 /* ADQ VSI(s) has been rebuilt successfully, so setup
8517 * channel for main VSI's Tx and Rx rings
8519 list_for_each_entry(ch, &main_vsi->ch_list, list) {
8520 struct ice_vsi *ch_vsi;
8522 ch_vsi = ch->ch_vsi;
8526 /* reconfig channel resources */
8527 ice_cfg_chnl_all_res(main_vsi, ch);
8529 /* replay BW rate limit if it is non-zero */
8530 if (!ch->max_tx_rate && !ch->min_tx_rate)
8533 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8536 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8537 err, ch->max_tx_rate, ch->min_tx_rate,
8540 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8541 ch->max_tx_rate, ch->min_tx_rate,
8545 /* reconfig RSS for main VSI */
8546 if (main_vsi->ch_rss_size)
8547 ice_vsi_cfg_rss_lut_key(main_vsi);
8552 ice_remove_q_channels(main_vsi, rem_adv_fltr);
8557 * ice_create_q_channels - Add queue channel for the given TCs
8558 * @vsi: VSI to be configured
8560 * Configures queue channel mapping to the given TCs
8562 static int ice_create_q_channels(struct ice_vsi *vsi)
8564 struct ice_pf *pf = vsi->back;
8565 struct ice_channel *ch;
8568 ice_for_each_chnl_tc(i) {
8569 if (!(vsi->all_enatc & BIT(i)))
8572 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8577 INIT_LIST_HEAD(&ch->list);
8578 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8579 ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8580 ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8581 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8582 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8584 /* convert to Kbits/s */
8585 if (ch->max_tx_rate)
8586 ch->max_tx_rate = div_u64(ch->max_tx_rate,
8587 ICE_BW_KBPS_DIVISOR);
8588 if (ch->min_tx_rate)
8589 ch->min_tx_rate = div_u64(ch->min_tx_rate,
8590 ICE_BW_KBPS_DIVISOR);
8592 ret = ice_create_q_channel(vsi, ch);
8594 dev_err(ice_pf_to_dev(pf),
8595 "failed creating channel TC:%d\n", i);
8599 list_add_tail(&ch->list, &vsi->ch_list);
8600 vsi->tc_map_vsi[i] = ch->ch_vsi;
8601 dev_dbg(ice_pf_to_dev(pf),
8602 "successfully created channel: VSI %pK\n", ch->ch_vsi);
8607 ice_remove_q_channels(vsi, false);
8613 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8614 * @netdev: net device to configure
8615 * @type_data: TC offload data
8617 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8619 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8620 struct ice_netdev_priv *np = netdev_priv(netdev);
8621 struct ice_vsi *vsi = np->vsi;
8622 struct ice_pf *pf = vsi->back;
8623 u16 mode, ena_tc_qdisc = 0;
8624 int cur_txq, cur_rxq;
8629 dev = ice_pf_to_dev(pf);
8630 num_tcf = mqprio_qopt->qopt.num_tc;
8631 hw = mqprio_qopt->qopt.hw;
8632 mode = mqprio_qopt->mode;
8634 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8635 vsi->ch_rss_size = 0;
8636 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8640 /* Generate queue region map for number of TCF requested */
8641 for (i = 0; i < num_tcf; i++)
8642 ena_tc_qdisc |= BIT(i);
8645 case TC_MQPRIO_MODE_CHANNEL:
8647 if (pf->hw.port_info->is_custom_tx_enabled) {
8648 dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
8651 ice_tear_down_devlink_rate_tree(pf);
8653 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8655 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8659 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8660 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8661 /* don't assume state of hw_tc_offload during driver load
8662 * and set the flag for TC flower filter if hw_tc_offload
8665 if (vsi->netdev->features & NETIF_F_HW_TC)
8666 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8674 /* Requesting same TCF configuration as already enabled */
8675 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8676 mode != TC_MQPRIO_MODE_CHANNEL)
8679 /* Pause VSI queues */
8680 ice_dis_vsi(vsi, true);
8682 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8683 ice_remove_q_channels(vsi, true);
8685 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8686 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8688 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8691 /* logic to rebuild VSI, same like ethtool -L */
8692 u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8694 for (i = 0; i < num_tcf; i++) {
8695 if (!(ena_tc_qdisc & BIT(i)))
8698 offset = vsi->mqprio_qopt.qopt.offset[i];
8699 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8700 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8702 vsi->req_txq = offset + qcount_tx;
8703 vsi->req_rxq = offset + qcount_rx;
8705 /* store away original rss_size info, so that it gets reused
8706 * form ice_vsi_rebuild during tc-qdisc delete stage - to
8707 * determine, what should be the rss_sizefor main VSI
8709 vsi->orig_rss_size = vsi->rss_size;
8712 /* save current values of Tx and Rx queues before calling VSI rebuild
8713 * for fallback option
8715 cur_txq = vsi->num_txq;
8716 cur_rxq = vsi->num_rxq;
8718 /* proceed with rebuild main VSI using correct number of queues */
8719 ret = ice_vsi_rebuild(vsi, false);
8721 /* fallback to current number of queues */
8722 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8723 vsi->req_txq = cur_txq;
8724 vsi->req_rxq = cur_rxq;
8725 clear_bit(ICE_RESET_FAILED, pf->state);
8726 if (ice_vsi_rebuild(vsi, false)) {
8727 dev_err(dev, "Rebuild of main VSI failed again\n");
8732 vsi->all_numtc = num_tcf;
8733 vsi->all_enatc = ena_tc_qdisc;
8734 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8736 netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8741 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8742 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8743 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8745 /* set TC0 rate limit if specified */
8746 if (max_tx_rate || min_tx_rate) {
8747 /* convert to Kbits/s */
8749 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8751 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8753 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8755 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8756 max_tx_rate, min_tx_rate, vsi->vsi_num);
8758 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8759 max_tx_rate, min_tx_rate, vsi->vsi_num);
8763 ret = ice_create_q_channels(vsi);
8765 netdev_err(netdev, "failed configuring queue channels\n");
8768 netdev_dbg(netdev, "successfully configured channels\n");
8772 if (vsi->ch_rss_size)
8773 ice_vsi_cfg_rss_lut_key(vsi);
8776 /* if error, reset the all_numtc and all_enatc */
8782 ice_ena_vsi(vsi, true);
8787 static LIST_HEAD(ice_block_cb_list);
8790 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8793 struct ice_netdev_priv *np = netdev_priv(netdev);
8794 struct ice_pf *pf = np->vsi->back;
8798 case TC_SETUP_BLOCK:
8799 return flow_block_cb_setup_simple(type_data,
8801 ice_setup_tc_block_cb,
8803 case TC_SETUP_QDISC_MQPRIO:
8804 /* setup traffic classifier for receive side */
8805 mutex_lock(&pf->tc_mutex);
8806 err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
8807 mutex_unlock(&pf->tc_mutex);
8815 static struct ice_indr_block_priv *
8816 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
8817 struct net_device *netdev)
8819 struct ice_indr_block_priv *cb_priv;
8821 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
8822 if (!cb_priv->netdev)
8824 if (cb_priv->netdev == netdev)
8831 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
8834 struct ice_indr_block_priv *priv = indr_priv;
8835 struct ice_netdev_priv *np = priv->np;
8838 case TC_SETUP_CLSFLOWER:
8839 return ice_setup_tc_cls_flower(np, priv->netdev,
8840 (struct flow_cls_offload *)
8848 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
8849 struct ice_netdev_priv *np,
8850 struct flow_block_offload *f, void *data,
8851 void (*cleanup)(struct flow_block_cb *block_cb))
8853 struct ice_indr_block_priv *indr_priv;
8854 struct flow_block_cb *block_cb;
8856 if (!ice_is_tunnel_supported(netdev) &&
8857 !(is_vlan_dev(netdev) &&
8858 vlan_dev_real_dev(netdev) == np->vsi->netdev))
8861 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
8864 switch (f->command) {
8865 case FLOW_BLOCK_BIND:
8866 indr_priv = ice_indr_block_priv_lookup(np, netdev);
8870 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
8874 indr_priv->netdev = netdev;
8876 list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
8879 flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
8880 indr_priv, indr_priv,
8881 ice_rep_indr_tc_block_unbind,
8882 f, netdev, sch, data, np,
8885 if (IS_ERR(block_cb)) {
8886 list_del(&indr_priv->list);
8888 return PTR_ERR(block_cb);
8890 flow_block_cb_add(block_cb, f);
8891 list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
8893 case FLOW_BLOCK_UNBIND:
8894 indr_priv = ice_indr_block_priv_lookup(np, netdev);
8898 block_cb = flow_block_cb_lookup(f->block,
8899 ice_indr_setup_block_cb,
8904 flow_indr_block_cb_remove(block_cb, f);
8906 list_del(&block_cb->driver_list);
8915 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
8916 void *cb_priv, enum tc_setup_type type, void *type_data,
8918 void (*cleanup)(struct flow_block_cb *block_cb))
8921 case TC_SETUP_BLOCK:
8922 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
8931 * ice_open - Called when a network interface becomes active
8932 * @netdev: network interface device structure
8934 * The open entry point is called when a network interface is made
8935 * active by the system (IFF_UP). At this point all resources needed
8936 * for transmit and receive operations are allocated, the interrupt
8937 * handler is registered with the OS, the netdev watchdog is enabled,
8938 * and the stack is notified that the interface is ready.
8940 * Returns 0 on success, negative value on failure
8942 int ice_open(struct net_device *netdev)
8944 struct ice_netdev_priv *np = netdev_priv(netdev);
8945 struct ice_pf *pf = np->vsi->back;
8947 if (ice_is_reset_in_progress(pf->state)) {
8948 netdev_err(netdev, "can't open net device while reset is in progress");
8952 return ice_open_internal(netdev);
8956 * ice_open_internal - Called when a network interface becomes active
8957 * @netdev: network interface device structure
8959 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
8962 * Returns 0 on success, negative value on failure
8964 int ice_open_internal(struct net_device *netdev)
8966 struct ice_netdev_priv *np = netdev_priv(netdev);
8967 struct ice_vsi *vsi = np->vsi;
8968 struct ice_pf *pf = vsi->back;
8969 struct ice_port_info *pi;
8972 if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
8973 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
8977 netif_carrier_off(netdev);
8979 pi = vsi->port_info;
8980 err = ice_update_link_info(pi);
8982 netdev_err(netdev, "Failed to get link info, error %d\n", err);
8986 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
8988 /* Set PHY if there is media, otherwise, turn off PHY */
8989 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
8990 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
8991 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
8992 err = ice_init_phy_user_cfg(pi);
8994 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9000 err = ice_configure_phy(vsi);
9002 netdev_err(netdev, "Failed to set physical link up, error %d\n",
9007 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9008 ice_set_link(vsi, false);
9011 err = ice_vsi_open(vsi);
9013 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9014 vsi->vsi_num, vsi->vsw->sw_id);
9016 /* Update existing tunnels information */
9017 udp_tunnel_get_rx_info(netdev);
9023 * ice_stop - Disables a network interface
9024 * @netdev: network interface device structure
9026 * The stop entry point is called when an interface is de-activated by the OS,
9027 * and the netdevice enters the DOWN state. The hardware is still under the
9028 * driver's control, but the netdev interface is disabled.
9030 * Returns success only - not allowed to fail
9032 int ice_stop(struct net_device *netdev)
9034 struct ice_netdev_priv *np = netdev_priv(netdev);
9035 struct ice_vsi *vsi = np->vsi;
9036 struct ice_pf *pf = vsi->back;
9038 if (ice_is_reset_in_progress(pf->state)) {
9039 netdev_err(netdev, "can't stop net device while reset is in progress");
9043 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9044 int link_err = ice_force_phys_link_state(vsi, false);
9047 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9048 vsi->vsi_num, link_err);
9059 * ice_features_check - Validate encapsulated packet conforms to limits
9061 * @netdev: This port's netdev
9062 * @features: Offload features that the stack believes apply
9064 static netdev_features_t
9065 ice_features_check(struct sk_buff *skb,
9066 struct net_device __always_unused *netdev,
9067 netdev_features_t features)
9069 bool gso = skb_is_gso(skb);
9072 /* No point in doing any of this if neither checksum nor GSO are
9073 * being requested for this frame. We can rule out both by just
9074 * checking for CHECKSUM_PARTIAL
9076 if (skb->ip_summed != CHECKSUM_PARTIAL)
9079 /* We cannot support GSO if the MSS is going to be less than
9080 * 64 bytes. If it is then we need to drop support for GSO.
9082 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9083 features &= ~NETIF_F_GSO_MASK;
9085 len = skb_network_offset(skb);
9086 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9087 goto out_rm_features;
9089 len = skb_network_header_len(skb);
9090 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9091 goto out_rm_features;
9093 if (skb->encapsulation) {
9094 /* this must work for VXLAN frames AND IPIP/SIT frames, and in
9095 * the case of IPIP frames, the transport header pointer is
9096 * after the inner header! So check to make sure that this
9097 * is a GRE or UDP_TUNNEL frame before doing that math.
9099 if (gso && (skb_shinfo(skb)->gso_type &
9100 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9101 len = skb_inner_network_header(skb) -
9102 skb_transport_header(skb);
9103 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9104 goto out_rm_features;
9107 len = skb_inner_network_header_len(skb);
9108 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9109 goto out_rm_features;
9114 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9117 static const struct net_device_ops ice_netdev_safe_mode_ops = {
9118 .ndo_open = ice_open,
9119 .ndo_stop = ice_stop,
9120 .ndo_start_xmit = ice_start_xmit,
9121 .ndo_set_mac_address = ice_set_mac_address,
9122 .ndo_validate_addr = eth_validate_addr,
9123 .ndo_change_mtu = ice_change_mtu,
9124 .ndo_get_stats64 = ice_get_stats64,
9125 .ndo_tx_timeout = ice_tx_timeout,
9126 .ndo_bpf = ice_xdp_safe_mode,
9129 static const struct net_device_ops ice_netdev_ops = {
9130 .ndo_open = ice_open,
9131 .ndo_stop = ice_stop,
9132 .ndo_start_xmit = ice_start_xmit,
9133 .ndo_select_queue = ice_select_queue,
9134 .ndo_features_check = ice_features_check,
9135 .ndo_fix_features = ice_fix_features,
9136 .ndo_set_rx_mode = ice_set_rx_mode,
9137 .ndo_set_mac_address = ice_set_mac_address,
9138 .ndo_validate_addr = eth_validate_addr,
9139 .ndo_change_mtu = ice_change_mtu,
9140 .ndo_get_stats64 = ice_get_stats64,
9141 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
9142 .ndo_eth_ioctl = ice_eth_ioctl,
9143 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9144 .ndo_set_vf_mac = ice_set_vf_mac,
9145 .ndo_get_vf_config = ice_get_vf_cfg,
9146 .ndo_set_vf_trust = ice_set_vf_trust,
9147 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
9148 .ndo_set_vf_link_state = ice_set_vf_link_state,
9149 .ndo_get_vf_stats = ice_get_vf_stats,
9150 .ndo_set_vf_rate = ice_set_vf_bw,
9151 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9152 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9153 .ndo_setup_tc = ice_setup_tc,
9154 .ndo_set_features = ice_set_features,
9155 .ndo_bridge_getlink = ice_bridge_getlink,
9156 .ndo_bridge_setlink = ice_bridge_setlink,
9157 .ndo_fdb_add = ice_fdb_add,
9158 .ndo_fdb_del = ice_fdb_del,
9159 #ifdef CONFIG_RFS_ACCEL
9160 .ndo_rx_flow_steer = ice_rx_flow_steer,
9162 .ndo_tx_timeout = ice_tx_timeout,
9164 .ndo_xdp_xmit = ice_xdp_xmit,
9165 .ndo_xsk_wakeup = ice_xsk_wakeup,