1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2023, Intel Corporation. */
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
14 #include "ice_dcb_lib.h"
15 #include "ice_dcb_nl.h"
16 #include "ice_devlink.h"
17 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
18 * ice tracepoint functions. This must be done exactly once across the
21 #define CREATE_TRACE_POINTS
22 #include "ice_trace.h"
23 #include "ice_eswitch.h"
24 #include "ice_tc_lib.h"
25 #include "ice_vsi_vlan_ops.h"
26 #include <net/xdp_sock_drv.h>
28 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
29 static const char ice_driver_string[] = DRV_SUMMARY;
30 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
32 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
33 #define ICE_DDP_PKG_PATH "intel/ice/ddp/"
34 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
36 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
37 MODULE_DESCRIPTION(DRV_SUMMARY);
38 MODULE_LICENSE("GPL v2");
39 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
41 static int debug = -1;
42 module_param(debug, int, 0644);
43 #ifndef CONFIG_DYNAMIC_DEBUG
44 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
46 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
47 #endif /* !CONFIG_DYNAMIC_DEBUG */
49 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
50 EXPORT_SYMBOL(ice_xdp_locking_key);
53 * ice_hw_to_dev - Get device pointer from the hardware structure
54 * @hw: pointer to the device HW structure
56 * Used to access the device pointer from compilation units which can't easily
57 * include the definition of struct ice_pf without leading to circular header
60 struct device *ice_hw_to_dev(struct ice_hw *hw)
62 struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
64 return &pf->pdev->dev;
67 static struct workqueue_struct *ice_wq;
68 struct workqueue_struct *ice_lag_wq;
69 static const struct net_device_ops ice_netdev_safe_mode_ops;
70 static const struct net_device_ops ice_netdev_ops;
72 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
74 static void ice_vsi_release_all(struct ice_pf *pf);
76 static int ice_rebuild_channels(struct ice_pf *pf);
77 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
80 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
81 void *cb_priv, enum tc_setup_type type, void *type_data,
83 void (*cleanup)(struct flow_block_cb *block_cb));
85 bool netif_is_ice(const struct net_device *dev)
87 return dev && (dev->netdev_ops == &ice_netdev_ops);
91 * ice_get_tx_pending - returns number of Tx descriptors not processed
92 * @ring: the ring of descriptors
94 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
98 head = ring->next_to_clean;
99 tail = ring->next_to_use;
102 return (head < tail) ?
103 tail - head : (tail + ring->count - head);
108 * ice_check_for_hang_subtask - check for and recover hung queues
109 * @pf: pointer to PF struct
111 static void ice_check_for_hang_subtask(struct ice_pf *pf)
113 struct ice_vsi *vsi = NULL;
119 ice_for_each_vsi(pf, v)
120 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
125 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
128 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
133 ice_for_each_txq(vsi, i) {
134 struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
135 struct ice_ring_stats *ring_stats;
139 if (ice_ring_ch_enabled(tx_ring))
142 ring_stats = tx_ring->ring_stats;
147 /* If packet counter has not changed the queue is
148 * likely stalled, so force an interrupt for this
151 * prev_pkt would be negative if there was no
154 packets = ring_stats->stats.pkts & INT_MAX;
155 if (ring_stats->tx_stats.prev_pkt == packets) {
156 /* Trigger sw interrupt to revive the queue */
157 ice_trigger_sw_intr(hw, tx_ring->q_vector);
161 /* Memory barrier between read of packet count and call
162 * to ice_get_tx_pending()
165 ring_stats->tx_stats.prev_pkt =
166 ice_get_tx_pending(tx_ring) ? packets : -1;
172 * ice_init_mac_fltr - Set initial MAC filters
173 * @pf: board private structure
175 * Set initial set of MAC filters for PF VSI; configure filters for permanent
176 * address and broadcast address. If an error is encountered, netdevice will be
179 static int ice_init_mac_fltr(struct ice_pf *pf)
184 vsi = ice_get_main_vsi(pf);
188 perm_addr = vsi->port_info->mac.perm_addr;
189 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
193 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
194 * @netdev: the net device on which the sync is happening
195 * @addr: MAC address to sync
197 * This is a callback function which is called by the in kernel device sync
198 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
199 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
200 * MAC filters from the hardware.
202 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
204 struct ice_netdev_priv *np = netdev_priv(netdev);
205 struct ice_vsi *vsi = np->vsi;
207 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
215 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
216 * @netdev: the net device on which the unsync is happening
217 * @addr: MAC address to unsync
219 * This is a callback function which is called by the in kernel device unsync
220 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
221 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
222 * delete the MAC filters from the hardware.
224 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
226 struct ice_netdev_priv *np = netdev_priv(netdev);
227 struct ice_vsi *vsi = np->vsi;
229 /* Under some circumstances, we might receive a request to delete our
230 * own device address from our uc list. Because we store the device
231 * address in the VSI's MAC filter list, we need to ignore such
232 * requests and not delete our device address from this list.
234 if (ether_addr_equal(addr, netdev->dev_addr))
237 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
245 * ice_vsi_fltr_changed - check if filter state changed
246 * @vsi: VSI to be checked
248 * returns true if filter state has changed, false otherwise.
250 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
252 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
253 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
257 * ice_set_promisc - Enable promiscuous mode for a given PF
258 * @vsi: the VSI being configured
259 * @promisc_m: mask of promiscuous config bits
262 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
266 if (vsi->type != ICE_VSI_PF)
269 if (ice_vsi_has_non_zero_vlans(vsi)) {
270 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
271 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
274 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
277 if (status && status != -EEXIST)
280 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
281 vsi->vsi_num, promisc_m);
286 * ice_clear_promisc - Disable promiscuous mode for a given PF
287 * @vsi: the VSI being configured
288 * @promisc_m: mask of promiscuous config bits
291 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
295 if (vsi->type != ICE_VSI_PF)
298 if (ice_vsi_has_non_zero_vlans(vsi)) {
299 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
300 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
303 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
307 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
308 vsi->vsi_num, promisc_m);
313 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
314 * @vsi: ptr to the VSI
316 * Push any outstanding VSI filter changes through the AdminQ.
318 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
320 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
321 struct device *dev = ice_pf_to_dev(vsi->back);
322 struct net_device *netdev = vsi->netdev;
323 bool promisc_forced_on = false;
324 struct ice_pf *pf = vsi->back;
325 struct ice_hw *hw = &pf->hw;
326 u32 changed_flags = 0;
332 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
333 usleep_range(1000, 2000);
335 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
336 vsi->current_netdev_flags = vsi->netdev->flags;
338 INIT_LIST_HEAD(&vsi->tmp_sync_list);
339 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
341 if (ice_vsi_fltr_changed(vsi)) {
342 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
343 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
345 /* grab the netdev's addr_list_lock */
346 netif_addr_lock_bh(netdev);
347 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
348 ice_add_mac_to_unsync_list);
349 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
350 ice_add_mac_to_unsync_list);
351 /* our temp lists are populated. release lock */
352 netif_addr_unlock_bh(netdev);
355 /* Remove MAC addresses in the unsync list */
356 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
357 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
359 netdev_err(netdev, "Failed to delete MAC filters\n");
360 /* if we failed because of alloc failures, just bail */
365 /* Add MAC addresses in the sync list */
366 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
367 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
368 /* If filter is added successfully or already exists, do not go into
369 * 'if' condition and report it as error. Instead continue processing
370 * rest of the function.
372 if (err && err != -EEXIST) {
373 netdev_err(netdev, "Failed to add MAC filters\n");
374 /* If there is no more space for new umac filters, VSI
375 * should go into promiscuous mode. There should be some
376 * space reserved for promiscuous filters.
378 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
379 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
381 promisc_forced_on = true;
382 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
389 /* check for changes in promiscuous modes */
390 if (changed_flags & IFF_ALLMULTI) {
391 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
392 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
394 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
398 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
399 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
401 vsi->current_netdev_flags |= IFF_ALLMULTI;
407 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
408 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
409 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
410 if (vsi->current_netdev_flags & IFF_PROMISC) {
411 /* Apply Rx filter rule to get traffic from wire */
412 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
413 err = ice_set_dflt_vsi(vsi);
414 if (err && err != -EEXIST) {
415 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
417 vsi->current_netdev_flags &=
422 vlan_ops->dis_rx_filtering(vsi);
424 /* promiscuous mode implies allmulticast so
425 * that VSIs that are in promiscuous mode are
426 * subscribed to multicast packets coming to
429 err = ice_set_promisc(vsi,
430 ICE_MCAST_PROMISC_BITS);
435 /* Clear Rx filter to remove traffic from wire */
436 if (ice_is_vsi_dflt_vsi(vsi)) {
437 err = ice_clear_dflt_vsi(vsi);
439 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
441 vsi->current_netdev_flags |=
445 if (vsi->netdev->features &
446 NETIF_F_HW_VLAN_CTAG_FILTER)
447 vlan_ops->ena_rx_filtering(vsi);
450 /* disable allmulti here, but only if allmulti is not
451 * still enabled for the netdev
453 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
454 err = ice_clear_promisc(vsi,
455 ICE_MCAST_PROMISC_BITS);
457 netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
466 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
469 /* if something went wrong then set the changed flag so we try again */
470 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
471 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
473 clear_bit(ICE_CFG_BUSY, vsi->state);
478 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
479 * @pf: board private structure
481 static void ice_sync_fltr_subtask(struct ice_pf *pf)
485 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
488 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
490 ice_for_each_vsi(pf, v)
491 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
492 ice_vsi_sync_fltr(pf->vsi[v])) {
493 /* come back and try again later */
494 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
500 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
502 * @locked: is the rtnl_lock already held
504 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
509 ice_for_each_vsi(pf, v)
511 ice_dis_vsi(pf->vsi[v], locked);
513 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
514 pf->pf_agg_node[node].num_vsis = 0;
516 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
517 pf->vf_agg_node[node].num_vsis = 0;
521 * ice_clear_sw_switch_recipes - clear switch recipes
522 * @pf: board private structure
524 * Mark switch recipes as not created in sw structures. There are cases where
525 * rules (especially advanced rules) need to be restored, either re-read from
526 * hardware or added again. For example after the reset. 'recp_created' flag
527 * prevents from doing that and need to be cleared upfront.
529 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
531 struct ice_sw_recipe *recp;
534 recp = pf->hw.switch_info->recp_list;
535 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
536 recp[i].recp_created = false;
540 * ice_prepare_for_reset - prep for reset
541 * @pf: board private structure
542 * @reset_type: reset type requested
544 * Inform or close all dependent features in prep for reset.
547 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
549 struct ice_hw *hw = &pf->hw;
554 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
556 /* already prepared for reset */
557 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
560 ice_unplug_aux_dev(pf);
562 /* Notify VFs of impending reset */
563 if (ice_check_sq_alive(hw, &hw->mailboxq))
564 ice_vc_notify_reset(pf);
566 /* Disable VFs until reset is completed */
567 mutex_lock(&pf->vfs.table_lock);
568 ice_for_each_vf(pf, bkt, vf)
569 ice_set_vf_state_dis(vf);
570 mutex_unlock(&pf->vfs.table_lock);
572 if (ice_is_eswitch_mode_switchdev(pf)) {
573 if (reset_type != ICE_RESET_PFR)
574 ice_clear_sw_switch_recipes(pf);
577 /* release ADQ specific HW and SW resources */
578 vsi = ice_get_main_vsi(pf);
582 /* to be on safe side, reset orig_rss_size so that normal flow
583 * of deciding rss_size can take precedence
585 vsi->orig_rss_size = 0;
587 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
588 if (reset_type == ICE_RESET_PFR) {
589 vsi->old_ena_tc = vsi->all_enatc;
590 vsi->old_numtc = vsi->all_numtc;
592 ice_remove_q_channels(vsi, true);
594 /* for other reset type, do not support channel rebuild
595 * hence reset needed info
603 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
604 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
609 /* clear SW filtering DB */
610 ice_clear_hw_tbls(hw);
611 /* disable the VSIs and their queues that are not already DOWN */
612 ice_pf_dis_all_vsi(pf, false);
614 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
615 ice_ptp_prepare_for_reset(pf);
617 if (ice_is_feature_supported(pf, ICE_F_GNSS))
621 ice_sched_clear_port(hw->port_info);
623 ice_shutdown_all_ctrlq(hw);
625 set_bit(ICE_PREPARED_FOR_RESET, pf->state);
629 * ice_do_reset - Initiate one of many types of resets
630 * @pf: board private structure
631 * @reset_type: reset type requested before this function was called.
633 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
635 struct device *dev = ice_pf_to_dev(pf);
636 struct ice_hw *hw = &pf->hw;
638 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
640 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
641 dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
642 reset_type = ICE_RESET_CORER;
645 ice_prepare_for_reset(pf, reset_type);
647 /* trigger the reset */
648 if (ice_reset(hw, reset_type)) {
649 dev_err(dev, "reset %d failed\n", reset_type);
650 set_bit(ICE_RESET_FAILED, pf->state);
651 clear_bit(ICE_RESET_OICR_RECV, pf->state);
652 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
653 clear_bit(ICE_PFR_REQ, pf->state);
654 clear_bit(ICE_CORER_REQ, pf->state);
655 clear_bit(ICE_GLOBR_REQ, pf->state);
656 wake_up(&pf->reset_wait_queue);
660 /* PFR is a bit of a special case because it doesn't result in an OICR
661 * interrupt. So for PFR, rebuild after the reset and clear the reset-
662 * associated state bits.
664 if (reset_type == ICE_RESET_PFR) {
666 ice_rebuild(pf, reset_type);
667 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
668 clear_bit(ICE_PFR_REQ, pf->state);
669 wake_up(&pf->reset_wait_queue);
670 ice_reset_all_vfs(pf);
675 * ice_reset_subtask - Set up for resetting the device and driver
676 * @pf: board private structure
678 static void ice_reset_subtask(struct ice_pf *pf)
680 enum ice_reset_req reset_type = ICE_RESET_INVAL;
682 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
683 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
684 * of reset is pending and sets bits in pf->state indicating the reset
685 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
686 * prepare for pending reset if not already (for PF software-initiated
687 * global resets the software should already be prepared for it as
688 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
689 * by firmware or software on other PFs, that bit is not set so prepare
690 * for the reset now), poll for reset done, rebuild and return.
692 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
693 /* Perform the largest reset requested */
694 if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
695 reset_type = ICE_RESET_CORER;
696 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
697 reset_type = ICE_RESET_GLOBR;
698 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
699 reset_type = ICE_RESET_EMPR;
700 /* return if no valid reset type requested */
701 if (reset_type == ICE_RESET_INVAL)
703 ice_prepare_for_reset(pf, reset_type);
705 /* make sure we are ready to rebuild */
706 if (ice_check_reset(&pf->hw)) {
707 set_bit(ICE_RESET_FAILED, pf->state);
709 /* done with reset. start rebuild */
710 pf->hw.reset_ongoing = false;
711 ice_rebuild(pf, reset_type);
712 /* clear bit to resume normal operations, but
713 * ICE_NEEDS_RESTART bit is set in case rebuild failed
715 clear_bit(ICE_RESET_OICR_RECV, pf->state);
716 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
717 clear_bit(ICE_PFR_REQ, pf->state);
718 clear_bit(ICE_CORER_REQ, pf->state);
719 clear_bit(ICE_GLOBR_REQ, pf->state);
720 wake_up(&pf->reset_wait_queue);
721 ice_reset_all_vfs(pf);
727 /* No pending resets to finish processing. Check for new resets */
728 if (test_bit(ICE_PFR_REQ, pf->state)) {
729 reset_type = ICE_RESET_PFR;
730 if (pf->lag && pf->lag->bonded) {
731 dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
732 reset_type = ICE_RESET_CORER;
735 if (test_bit(ICE_CORER_REQ, pf->state))
736 reset_type = ICE_RESET_CORER;
737 if (test_bit(ICE_GLOBR_REQ, pf->state))
738 reset_type = ICE_RESET_GLOBR;
739 /* If no valid reset type requested just return */
740 if (reset_type == ICE_RESET_INVAL)
743 /* reset if not already down or busy */
744 if (!test_bit(ICE_DOWN, pf->state) &&
745 !test_bit(ICE_CFG_BUSY, pf->state)) {
746 ice_do_reset(pf, reset_type);
751 * ice_print_topo_conflict - print topology conflict message
752 * @vsi: the VSI whose topology status is being checked
754 static void ice_print_topo_conflict(struct ice_vsi *vsi)
756 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
757 case ICE_AQ_LINK_TOPO_CONFLICT:
758 case ICE_AQ_LINK_MEDIA_CONFLICT:
759 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
760 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
761 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
762 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
764 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
765 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
766 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
768 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
776 * ice_print_link_msg - print link up or down message
777 * @vsi: the VSI whose link status is being queried
778 * @isup: boolean for if the link is now up or down
780 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
782 struct ice_aqc_get_phy_caps_data *caps;
783 const char *an_advertised;
794 if (vsi->current_isup == isup)
797 vsi->current_isup = isup;
800 netdev_info(vsi->netdev, "NIC Link is Down\n");
804 switch (vsi->port_info->phy.link_info.link_speed) {
805 case ICE_AQ_LINK_SPEED_100GB:
808 case ICE_AQ_LINK_SPEED_50GB:
811 case ICE_AQ_LINK_SPEED_40GB:
814 case ICE_AQ_LINK_SPEED_25GB:
817 case ICE_AQ_LINK_SPEED_20GB:
820 case ICE_AQ_LINK_SPEED_10GB:
823 case ICE_AQ_LINK_SPEED_5GB:
826 case ICE_AQ_LINK_SPEED_2500MB:
829 case ICE_AQ_LINK_SPEED_1000MB:
832 case ICE_AQ_LINK_SPEED_100MB:
840 switch (vsi->port_info->fc.current_mode) {
844 case ICE_FC_TX_PAUSE:
847 case ICE_FC_RX_PAUSE:
858 /* Get FEC mode based on negotiated link info */
859 switch (vsi->port_info->phy.link_info.fec_info) {
860 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
861 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
864 case ICE_AQ_LINK_25G_KR_FEC_EN:
865 fec = "FC-FEC/BASE-R";
872 /* check if autoneg completed, might be false due to not supported */
873 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
878 /* Get FEC mode requested based on PHY caps last SW configuration */
879 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
882 an_advertised = "Unknown";
886 status = ice_aq_get_phy_caps(vsi->port_info, false,
887 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
889 netdev_info(vsi->netdev, "Get phy capability failed.\n");
891 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
893 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
894 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
896 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
897 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
898 fec_req = "FC-FEC/BASE-R";
905 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
906 speed, fec_req, fec, an_advertised, an, fc);
907 ice_print_topo_conflict(vsi);
911 * ice_vsi_link_event - update the VSI's netdev
912 * @vsi: the VSI on which the link event occurred
913 * @link_up: whether or not the VSI needs to be set up or down
915 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
920 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
923 if (vsi->type == ICE_VSI_PF) {
924 if (link_up == netif_carrier_ok(vsi->netdev))
928 netif_carrier_on(vsi->netdev);
929 netif_tx_wake_all_queues(vsi->netdev);
931 netif_carrier_off(vsi->netdev);
932 netif_tx_stop_all_queues(vsi->netdev);
938 * ice_set_dflt_mib - send a default config MIB to the FW
939 * @pf: private PF struct
941 * This function sends a default configuration MIB to the FW.
943 * If this function errors out at any point, the driver is still able to
944 * function. The main impact is that LFC may not operate as expected.
945 * Therefore an error state in this function should be treated with a DBG
946 * message and continue on with driver rebuild/reenable.
948 static void ice_set_dflt_mib(struct ice_pf *pf)
950 struct device *dev = ice_pf_to_dev(pf);
951 u8 mib_type, *buf, *lldpmib = NULL;
952 u16 len, typelen, offset = 0;
953 struct ice_lldp_org_tlv *tlv;
954 struct ice_hw *hw = &pf->hw;
957 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
958 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
960 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
965 /* Add ETS CFG TLV */
966 tlv = (struct ice_lldp_org_tlv *)lldpmib;
967 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
968 ICE_IEEE_ETS_TLV_LEN);
969 tlv->typelen = htons(typelen);
970 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
971 ICE_IEEE_SUBTYPE_ETS_CFG);
972 tlv->ouisubtype = htonl(ouisubtype);
977 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
978 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
979 * Octets 13 - 20 are TSA values - leave as zeros
982 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
984 tlv = (struct ice_lldp_org_tlv *)
985 ((char *)tlv + sizeof(tlv->typelen) + len);
987 /* Add ETS REC TLV */
989 tlv->typelen = htons(typelen);
991 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
992 ICE_IEEE_SUBTYPE_ETS_REC);
993 tlv->ouisubtype = htonl(ouisubtype);
995 /* First octet of buf is reserved
996 * Octets 1 - 4 map UP to TC - all UPs map to zero
997 * Octets 5 - 12 are BW values - set TC 0 to 100%.
998 * Octets 13 - 20 are TSA value - leave as zeros
1002 tlv = (struct ice_lldp_org_tlv *)
1003 ((char *)tlv + sizeof(tlv->typelen) + len);
1005 /* Add PFC CFG TLV */
1006 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1007 ICE_IEEE_PFC_TLV_LEN);
1008 tlv->typelen = htons(typelen);
1010 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1011 ICE_IEEE_SUBTYPE_PFC_CFG);
1012 tlv->ouisubtype = htonl(ouisubtype);
1014 /* Octet 1 left as all zeros - PFC disabled */
1016 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
1019 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1020 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1026 * ice_check_phy_fw_load - check if PHY FW load failed
1027 * @pf: pointer to PF struct
1028 * @link_cfg_err: bitmap from the link info structure
1030 * check if external PHY FW load failed and print an error message if it did
1032 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1034 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1035 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1039 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1042 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1043 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1044 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1049 * ice_check_module_power
1050 * @pf: pointer to PF struct
1051 * @link_cfg_err: bitmap from the link info structure
1053 * check module power level returned by a previous call to aq_get_link_info
1054 * and print error messages if module power level is not supported
1056 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1058 /* if module power level is supported, clear the flag */
1059 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1060 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1061 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1065 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1066 * above block didn't clear this bit, there's nothing to do
1068 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1071 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1072 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1073 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1074 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1075 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1076 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1081 * ice_check_link_cfg_err - check if link configuration failed
1082 * @pf: pointer to the PF struct
1083 * @link_cfg_err: bitmap from the link info structure
1085 * print if any link configuration failure happens due to the value in the
1086 * link_cfg_err parameter in the link info structure
1088 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1090 ice_check_module_power(pf, link_cfg_err);
1091 ice_check_phy_fw_load(pf, link_cfg_err);
1095 * ice_link_event - process the link event
1096 * @pf: PF that the link event is associated with
1097 * @pi: port_info for the port that the link event is associated with
1098 * @link_up: true if the physical link is up and false if it is down
1099 * @link_speed: current link speed received from the link event
1101 * Returns 0 on success and negative on failure
1104 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1107 struct device *dev = ice_pf_to_dev(pf);
1108 struct ice_phy_info *phy_info;
1109 struct ice_vsi *vsi;
1114 phy_info = &pi->phy;
1115 phy_info->link_info_old = phy_info->link_info;
1117 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1118 old_link_speed = phy_info->link_info_old.link_speed;
1120 /* update the link info structures and re-enable link events,
1121 * don't bail on failure due to other book keeping needed
1123 status = ice_update_link_info(pi);
1125 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1127 ice_aq_str(pi->hw->adminq.sq_last_status));
1129 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1131 /* Check if the link state is up after updating link info, and treat
1132 * this event as an UP event since the link is actually UP now.
1134 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1137 vsi = ice_get_main_vsi(pf);
1138 if (!vsi || !vsi->port_info)
1141 /* turn off PHY if media was removed */
1142 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1143 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1144 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1145 ice_set_link(vsi, false);
1148 /* if the old link up/down and speed is the same as the new */
1149 if (link_up == old_link && link_speed == old_link_speed)
1152 ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1154 if (ice_is_dcb_active(pf)) {
1155 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1156 ice_dcb_rebuild(pf);
1159 ice_set_dflt_mib(pf);
1161 ice_vsi_link_event(vsi, link_up);
1162 ice_print_link_msg(vsi, link_up);
1164 ice_vc_notify_link_state(pf);
1170 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1171 * @pf: board private structure
1173 static void ice_watchdog_subtask(struct ice_pf *pf)
1177 /* if interface is down do nothing */
1178 if (test_bit(ICE_DOWN, pf->state) ||
1179 test_bit(ICE_CFG_BUSY, pf->state))
1182 /* make sure we don't do these things too often */
1183 if (time_before(jiffies,
1184 pf->serv_tmr_prev + pf->serv_tmr_period))
1187 pf->serv_tmr_prev = jiffies;
1189 /* Update the stats for active netdevs so the network stack
1190 * can look at updated numbers whenever it cares to
1192 ice_update_pf_stats(pf);
1193 ice_for_each_vsi(pf, i)
1194 if (pf->vsi[i] && pf->vsi[i]->netdev)
1195 ice_update_vsi_stats(pf->vsi[i]);
1199 * ice_init_link_events - enable/initialize link events
1200 * @pi: pointer to the port_info instance
1202 * Returns -EIO on failure, 0 on success
1204 static int ice_init_link_events(struct ice_port_info *pi)
1208 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1209 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1210 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1212 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1213 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1218 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1219 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1228 * ice_handle_link_event - handle link event via ARQ
1229 * @pf: PF that the link event is associated with
1230 * @event: event structure containing link status info
1233 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1235 struct ice_aqc_get_link_status_data *link_data;
1236 struct ice_port_info *port_info;
1239 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1240 port_info = pf->hw.port_info;
1244 status = ice_link_event(pf, port_info,
1245 !!(link_data->link_info & ICE_AQ_LINK_UP),
1246 le16_to_cpu(link_data->link_speed));
1248 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1255 * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1256 * @pf: pointer to the PF private structure
1257 * @task: intermediate helper storage and identifier for waiting
1258 * @opcode: the opcode to wait for
1260 * Prepares to wait for a specific AdminQ completion event on the ARQ for
1261 * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1263 * Calls are separated to allow caller registering for event before sending
1264 * the command, which mitigates a race between registering and FW responding.
1266 * To obtain only the descriptor contents, pass an task->event with null
1267 * msg_buf. If the complete data buffer is desired, allocate the
1268 * task->event.msg_buf with enough space ahead of time.
1270 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1273 INIT_HLIST_NODE(&task->entry);
1274 task->opcode = opcode;
1275 task->state = ICE_AQ_TASK_WAITING;
1277 spin_lock_bh(&pf->aq_wait_lock);
1278 hlist_add_head(&task->entry, &pf->aq_wait_list);
1279 spin_unlock_bh(&pf->aq_wait_lock);
1283 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1284 * @pf: pointer to the PF private structure
1285 * @task: ptr prepared by ice_aq_prep_for_event()
1286 * @timeout: how long to wait, in jiffies
1288 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1289 * current thread will be put to sleep until the specified event occurs or
1290 * until the given timeout is reached.
1292 * Returns: zero on success, or a negative error code on failure.
1294 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1295 unsigned long timeout)
1297 enum ice_aq_task_state *state = &task->state;
1298 struct device *dev = ice_pf_to_dev(pf);
1299 unsigned long start = jiffies;
1303 ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
1304 *state != ICE_AQ_TASK_WAITING,
1307 case ICE_AQ_TASK_NOT_PREPARED:
1308 WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
1311 case ICE_AQ_TASK_WAITING:
1312 err = ret < 0 ? ret : -ETIMEDOUT;
1314 case ICE_AQ_TASK_CANCELED:
1315 err = ret < 0 ? ret : -ECANCELED;
1317 case ICE_AQ_TASK_COMPLETE:
1318 err = ret < 0 ? ret : 0;
1321 WARN(1, "Unexpected AdminQ wait task state %u", *state);
1326 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1327 jiffies_to_msecs(jiffies - start),
1328 jiffies_to_msecs(timeout),
1331 spin_lock_bh(&pf->aq_wait_lock);
1332 hlist_del(&task->entry);
1333 spin_unlock_bh(&pf->aq_wait_lock);
1339 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1340 * @pf: pointer to the PF private structure
1341 * @opcode: the opcode of the event
1342 * @event: the event to check
1344 * Loops over the current list of pending threads waiting for an AdminQ event.
1345 * For each matching task, copy the contents of the event into the task
1346 * structure and wake up the thread.
1348 * If multiple threads wait for the same opcode, they will all be woken up.
1350 * Note that event->msg_buf will only be duplicated if the event has a buffer
1351 * with enough space already allocated. Otherwise, only the descriptor and
1352 * message length will be copied.
1354 * Returns: true if an event was found, false otherwise
1356 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1357 struct ice_rq_event_info *event)
1359 struct ice_rq_event_info *task_ev;
1360 struct ice_aq_task *task;
1363 spin_lock_bh(&pf->aq_wait_lock);
1364 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1365 if (task->state != ICE_AQ_TASK_WAITING)
1367 if (task->opcode != opcode)
1370 task_ev = &task->event;
1371 memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1372 task_ev->msg_len = event->msg_len;
1374 /* Only copy the data buffer if a destination was set */
1375 if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1376 memcpy(task_ev->msg_buf, event->msg_buf,
1378 task_ev->buf_len = event->buf_len;
1381 task->state = ICE_AQ_TASK_COMPLETE;
1384 spin_unlock_bh(&pf->aq_wait_lock);
1387 wake_up(&pf->aq_wait_queue);
1391 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1392 * @pf: the PF private structure
1394 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1395 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1397 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1399 struct ice_aq_task *task;
1401 spin_lock_bh(&pf->aq_wait_lock);
1402 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1403 task->state = ICE_AQ_TASK_CANCELED;
1404 spin_unlock_bh(&pf->aq_wait_lock);
1406 wake_up(&pf->aq_wait_queue);
1409 #define ICE_MBX_OVERFLOW_WATERMARK 64
1412 * __ice_clean_ctrlq - helper function to clean controlq rings
1413 * @pf: ptr to struct ice_pf
1414 * @q_type: specific Control queue type
1416 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1418 struct device *dev = ice_pf_to_dev(pf);
1419 struct ice_rq_event_info event;
1420 struct ice_hw *hw = &pf->hw;
1421 struct ice_ctl_q_info *cq;
1426 /* Do not clean control queue if/when PF reset fails */
1427 if (test_bit(ICE_RESET_FAILED, pf->state))
1431 case ICE_CTL_Q_ADMIN:
1439 case ICE_CTL_Q_MAILBOX:
1442 /* we are going to try to detect a malicious VF, so set the
1443 * state to begin detection
1445 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1448 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1452 /* check for error indications - PF_xx_AxQLEN register layout for
1453 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1455 val = rd32(hw, cq->rq.len);
1456 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1457 PF_FW_ARQLEN_ARQCRIT_M)) {
1459 if (val & PF_FW_ARQLEN_ARQVFE_M)
1460 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1462 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1463 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1466 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1467 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1469 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1470 PF_FW_ARQLEN_ARQCRIT_M);
1472 wr32(hw, cq->rq.len, val);
1475 val = rd32(hw, cq->sq.len);
1476 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1477 PF_FW_ATQLEN_ATQCRIT_M)) {
1479 if (val & PF_FW_ATQLEN_ATQVFE_M)
1480 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1482 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1483 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1486 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1487 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1489 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1490 PF_FW_ATQLEN_ATQCRIT_M);
1492 wr32(hw, cq->sq.len, val);
1495 event.buf_len = cq->rq_buf_size;
1496 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1501 struct ice_mbx_data data = {};
1505 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1506 if (ret == -EALREADY)
1509 dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1514 opcode = le16_to_cpu(event.desc.opcode);
1516 /* Notify any thread that might be waiting for this event */
1517 ice_aq_check_events(pf, opcode, &event);
1520 case ice_aqc_opc_get_link_status:
1521 if (ice_handle_link_event(pf, &event))
1522 dev_err(dev, "Could not handle link event\n");
1524 case ice_aqc_opc_event_lan_overflow:
1525 ice_vf_lan_overflow_event(pf, &event);
1527 case ice_mbx_opc_send_msg_to_pf:
1528 data.num_msg_proc = i;
1529 data.num_pending_arq = pending;
1530 data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
1531 data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
1533 ice_vc_process_vf_msg(pf, &event, &data);
1535 case ice_aqc_opc_fw_logging:
1536 ice_output_fw_log(hw, &event.desc, event.msg_buf);
1538 case ice_aqc_opc_lldp_set_mib_change:
1539 ice_dcb_process_lldp_set_mib_change(pf, &event);
1542 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1546 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1548 kfree(event.msg_buf);
1550 return pending && (i == ICE_DFLT_IRQ_WORK);
1554 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1555 * @hw: pointer to hardware info
1556 * @cq: control queue information
1558 * returns true if there are pending messages in a queue, false if there aren't
1560 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1564 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1565 return cq->rq.next_to_clean != ntu;
1569 * ice_clean_adminq_subtask - clean the AdminQ rings
1570 * @pf: board private structure
1572 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1574 struct ice_hw *hw = &pf->hw;
1576 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1579 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1582 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1584 /* There might be a situation where new messages arrive to a control
1585 * queue between processing the last message and clearing the
1586 * EVENT_PENDING bit. So before exiting, check queue head again (using
1587 * ice_ctrlq_pending) and process new messages if any.
1589 if (ice_ctrlq_pending(hw, &hw->adminq))
1590 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1596 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1597 * @pf: board private structure
1599 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1601 struct ice_hw *hw = &pf->hw;
1603 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1606 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1609 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1611 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1612 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1618 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1619 * @pf: board private structure
1621 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1623 struct ice_hw *hw = &pf->hw;
1625 /* Nothing to do here if sideband queue is not supported */
1626 if (!ice_is_sbq_supported(hw)) {
1627 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1631 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1634 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1637 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1639 if (ice_ctrlq_pending(hw, &hw->sbq))
1640 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1646 * ice_service_task_schedule - schedule the service task to wake up
1647 * @pf: board private structure
1649 * If not already scheduled, this puts the task into the work queue.
1651 void ice_service_task_schedule(struct ice_pf *pf)
1653 if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1654 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1655 !test_bit(ICE_NEEDS_RESTART, pf->state))
1656 queue_work(ice_wq, &pf->serv_task);
1660 * ice_service_task_complete - finish up the service task
1661 * @pf: board private structure
1663 static void ice_service_task_complete(struct ice_pf *pf)
1665 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1667 /* force memory (pf->state) to sync before next service task */
1668 smp_mb__before_atomic();
1669 clear_bit(ICE_SERVICE_SCHED, pf->state);
1673 * ice_service_task_stop - stop service task and cancel works
1674 * @pf: board private structure
1676 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1679 static int ice_service_task_stop(struct ice_pf *pf)
1683 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1685 if (pf->serv_tmr.function)
1686 del_timer_sync(&pf->serv_tmr);
1687 if (pf->serv_task.func)
1688 cancel_work_sync(&pf->serv_task);
1690 clear_bit(ICE_SERVICE_SCHED, pf->state);
1695 * ice_service_task_restart - restart service task and schedule works
1696 * @pf: board private structure
1698 * This function is needed for suspend and resume works (e.g WoL scenario)
1700 static void ice_service_task_restart(struct ice_pf *pf)
1702 clear_bit(ICE_SERVICE_DIS, pf->state);
1703 ice_service_task_schedule(pf);
1707 * ice_service_timer - timer callback to schedule service task
1708 * @t: pointer to timer_list
1710 static void ice_service_timer(struct timer_list *t)
1712 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1714 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1715 ice_service_task_schedule(pf);
1719 * ice_handle_mdd_event - handle malicious driver detect event
1720 * @pf: pointer to the PF structure
1722 * Called from service task. OICR interrupt handler indicates MDD event.
1723 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1724 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1725 * disable the queue, the PF can be configured to reset the VF using ethtool
1726 * private flag mdd-auto-reset-vf.
1728 static void ice_handle_mdd_event(struct ice_pf *pf)
1730 struct device *dev = ice_pf_to_dev(pf);
1731 struct ice_hw *hw = &pf->hw;
1736 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1737 /* Since the VF MDD event logging is rate limited, check if
1738 * there are pending MDD events.
1740 ice_print_vfs_mdd_events(pf);
1744 /* find what triggered an MDD event */
1745 reg = rd32(hw, GL_MDET_TX_PQM);
1746 if (reg & GL_MDET_TX_PQM_VALID_M) {
1747 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1748 GL_MDET_TX_PQM_PF_NUM_S;
1749 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1750 GL_MDET_TX_PQM_VF_NUM_S;
1751 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1752 GL_MDET_TX_PQM_MAL_TYPE_S;
1753 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1754 GL_MDET_TX_PQM_QNUM_S);
1756 if (netif_msg_tx_err(pf))
1757 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1758 event, queue, pf_num, vf_num);
1759 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1762 reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw));
1763 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1764 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1765 GL_MDET_TX_TCLAN_PF_NUM_S;
1766 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1767 GL_MDET_TX_TCLAN_VF_NUM_S;
1768 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1769 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1770 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1771 GL_MDET_TX_TCLAN_QNUM_S);
1773 if (netif_msg_tx_err(pf))
1774 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1775 event, queue, pf_num, vf_num);
1776 wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
1779 reg = rd32(hw, GL_MDET_RX);
1780 if (reg & GL_MDET_RX_VALID_M) {
1781 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1782 GL_MDET_RX_PF_NUM_S;
1783 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1784 GL_MDET_RX_VF_NUM_S;
1785 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1786 GL_MDET_RX_MAL_TYPE_S;
1787 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1790 if (netif_msg_rx_err(pf))
1791 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1792 event, queue, pf_num, vf_num);
1793 wr32(hw, GL_MDET_RX, 0xffffffff);
1796 /* check to see if this PF caused an MDD event */
1797 reg = rd32(hw, PF_MDET_TX_PQM);
1798 if (reg & PF_MDET_TX_PQM_VALID_M) {
1799 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1800 if (netif_msg_tx_err(pf))
1801 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1804 reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw));
1805 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1806 wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff);
1807 if (netif_msg_tx_err(pf))
1808 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1811 reg = rd32(hw, PF_MDET_RX);
1812 if (reg & PF_MDET_RX_VALID_M) {
1813 wr32(hw, PF_MDET_RX, 0xFFFF);
1814 if (netif_msg_rx_err(pf))
1815 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1818 /* Check to see if one of the VFs caused an MDD event, and then
1819 * increment counters and set print pending
1821 mutex_lock(&pf->vfs.table_lock);
1822 ice_for_each_vf(pf, bkt, vf) {
1823 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1824 if (reg & VP_MDET_TX_PQM_VALID_M) {
1825 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1826 vf->mdd_tx_events.count++;
1827 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1828 if (netif_msg_tx_err(pf))
1829 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1833 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1834 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1835 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1836 vf->mdd_tx_events.count++;
1837 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1838 if (netif_msg_tx_err(pf))
1839 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1843 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1844 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1845 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1846 vf->mdd_tx_events.count++;
1847 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1848 if (netif_msg_tx_err(pf))
1849 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1853 reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1854 if (reg & VP_MDET_RX_VALID_M) {
1855 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1856 vf->mdd_rx_events.count++;
1857 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1858 if (netif_msg_rx_err(pf))
1859 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1862 /* Since the queue is disabled on VF Rx MDD events, the
1863 * PF can be configured to reset the VF through ethtool
1864 * private flag mdd-auto-reset-vf.
1866 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1867 /* VF MDD event counters will be cleared by
1868 * reset, so print the event prior to reset.
1870 ice_print_vf_rx_mdd_event(vf);
1871 ice_reset_vf(vf, ICE_VF_RESET_LOCK);
1875 mutex_unlock(&pf->vfs.table_lock);
1877 ice_print_vfs_mdd_events(pf);
1881 * ice_force_phys_link_state - Force the physical link state
1882 * @vsi: VSI to force the physical link state to up/down
1883 * @link_up: true/false indicates to set the physical link to up/down
1885 * Force the physical link state by getting the current PHY capabilities from
1886 * hardware and setting the PHY config based on the determined capabilities. If
1887 * link changes a link event will be triggered because both the Enable Automatic
1888 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1890 * Returns 0 on success, negative on failure
1892 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1894 struct ice_aqc_get_phy_caps_data *pcaps;
1895 struct ice_aqc_set_phy_cfg_data *cfg;
1896 struct ice_port_info *pi;
1900 if (!vsi || !vsi->port_info || !vsi->back)
1902 if (vsi->type != ICE_VSI_PF)
1905 dev = ice_pf_to_dev(vsi->back);
1907 pi = vsi->port_info;
1909 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1913 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1916 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1917 vsi->vsi_num, retcode);
1922 /* No change in link */
1923 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1924 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1927 /* Use the current user PHY configuration. The current user PHY
1928 * configuration is initialized during probe from PHY capabilities
1929 * software mode, and updated on set PHY configuration.
1931 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1937 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1939 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1941 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1943 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1945 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1946 vsi->vsi_num, retcode);
1957 * ice_init_nvm_phy_type - Initialize the NVM PHY type
1958 * @pi: port info structure
1960 * Initialize nvm_phy_type_[low|high] for link lenient mode support
1962 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1964 struct ice_aqc_get_phy_caps_data *pcaps;
1965 struct ice_pf *pf = pi->hw->back;
1968 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1972 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
1976 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1980 pf->nvm_phy_type_hi = pcaps->phy_type_high;
1981 pf->nvm_phy_type_lo = pcaps->phy_type_low;
1989 * ice_init_link_dflt_override - Initialize link default override
1990 * @pi: port info structure
1992 * Initialize link default override and PHY total port shutdown during probe
1994 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1996 struct ice_link_default_override_tlv *ldo;
1997 struct ice_pf *pf = pi->hw->back;
1999 ldo = &pf->link_dflt_override;
2000 if (ice_get_link_default_override(ldo, pi))
2003 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
2006 /* Enable Total Port Shutdown (override/replace link-down-on-close
2007 * ethtool private flag) for ports with Port Disable bit set.
2009 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2010 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2014 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2015 * @pi: port info structure
2017 * If default override is enabled, initialize the user PHY cfg speed and FEC
2018 * settings using the default override mask from the NVM.
2020 * The PHY should only be configured with the default override settings the
2021 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2022 * is used to indicate that the user PHY cfg default override is initialized
2023 * and the PHY has not been configured with the default override settings. The
2024 * state is set here, and cleared in ice_configure_phy the first time the PHY is
2027 * This function should be called only if the FW doesn't support default
2028 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2030 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2032 struct ice_link_default_override_tlv *ldo;
2033 struct ice_aqc_set_phy_cfg_data *cfg;
2034 struct ice_phy_info *phy = &pi->phy;
2035 struct ice_pf *pf = pi->hw->back;
2037 ldo = &pf->link_dflt_override;
2039 /* If link default override is enabled, use to mask NVM PHY capabilities
2040 * for speed and FEC default configuration.
2042 cfg = &phy->curr_user_phy_cfg;
2044 if (ldo->phy_type_low || ldo->phy_type_high) {
2045 cfg->phy_type_low = pf->nvm_phy_type_lo &
2046 cpu_to_le64(ldo->phy_type_low);
2047 cfg->phy_type_high = pf->nvm_phy_type_hi &
2048 cpu_to_le64(ldo->phy_type_high);
2050 cfg->link_fec_opt = ldo->fec_options;
2051 phy->curr_user_fec_req = ICE_FEC_AUTO;
2053 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2057 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2058 * @pi: port info structure
2060 * Initialize the current user PHY configuration, speed, FEC, and FC requested
2061 * mode to default. The PHY defaults are from get PHY capabilities topology
2062 * with media so call when media is first available. An error is returned if
2063 * called when media is not available. The PHY initialization completed state is
2066 * These configurations are used when setting PHY
2067 * configuration. The user PHY configuration is updated on set PHY
2068 * configuration. Returns 0 on success, negative on failure
2070 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2072 struct ice_aqc_get_phy_caps_data *pcaps;
2073 struct ice_phy_info *phy = &pi->phy;
2074 struct ice_pf *pf = pi->hw->back;
2077 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2080 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2084 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2085 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2088 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2091 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2095 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2097 /* check if lenient mode is supported and enabled */
2098 if (ice_fw_supports_link_override(pi->hw) &&
2099 !(pcaps->module_compliance_enforcement &
2100 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2101 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2103 /* if the FW supports default PHY configuration mode, then the driver
2104 * does not have to apply link override settings. If not,
2105 * initialize user PHY configuration with link override values
2107 if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2108 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2109 ice_init_phy_cfg_dflt_override(pi);
2114 /* if link default override is not enabled, set user flow control and
2115 * FEC settings based on what get_phy_caps returned
2117 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2118 pcaps->link_fec_options);
2119 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2122 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2123 set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2130 * ice_configure_phy - configure PHY
2133 * Set the PHY configuration. If the current PHY configuration is the same as
2134 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2135 * configure the based get PHY capabilities for topology with media.
2137 static int ice_configure_phy(struct ice_vsi *vsi)
2139 struct device *dev = ice_pf_to_dev(vsi->back);
2140 struct ice_port_info *pi = vsi->port_info;
2141 struct ice_aqc_get_phy_caps_data *pcaps;
2142 struct ice_aqc_set_phy_cfg_data *cfg;
2143 struct ice_phy_info *phy = &pi->phy;
2144 struct ice_pf *pf = vsi->back;
2147 /* Ensure we have media as we cannot configure a medialess port */
2148 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2151 ice_print_topo_conflict(vsi);
2153 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2154 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2157 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2158 return ice_force_phys_link_state(vsi, true);
2160 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2164 /* Get current PHY config */
2165 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2168 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2173 /* If PHY enable link is configured and configuration has not changed,
2174 * there's nothing to do
2176 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2177 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2180 /* Use PHY topology as baseline for configuration */
2181 memset(pcaps, 0, sizeof(*pcaps));
2182 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2183 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2186 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2189 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2194 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2200 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2202 /* Speed - If default override pending, use curr_user_phy_cfg set in
2203 * ice_init_phy_user_cfg_ldo.
2205 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2206 vsi->back->state)) {
2207 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2208 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2210 u64 phy_low = 0, phy_high = 0;
2212 ice_update_phy_type(&phy_low, &phy_high,
2213 pi->phy.curr_user_speed_req);
2214 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2215 cfg->phy_type_high = pcaps->phy_type_high &
2216 cpu_to_le64(phy_high);
2219 /* Can't provide what was requested; use PHY capabilities */
2220 if (!cfg->phy_type_low && !cfg->phy_type_high) {
2221 cfg->phy_type_low = pcaps->phy_type_low;
2222 cfg->phy_type_high = pcaps->phy_type_high;
2226 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2228 /* Can't provide what was requested; use PHY capabilities */
2229 if (cfg->link_fec_opt !=
2230 (cfg->link_fec_opt & pcaps->link_fec_options)) {
2231 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2232 cfg->link_fec_opt = pcaps->link_fec_options;
2235 /* Flow Control - always supported; no need to check against
2238 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2240 /* Enable link and link update */
2241 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2243 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2245 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2255 * ice_check_media_subtask - Check for media
2256 * @pf: pointer to PF struct
2258 * If media is available, then initialize PHY user configuration if it is not
2259 * been, and configure the PHY if the interface is up.
2261 static void ice_check_media_subtask(struct ice_pf *pf)
2263 struct ice_port_info *pi;
2264 struct ice_vsi *vsi;
2267 /* No need to check for media if it's already present */
2268 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2271 vsi = ice_get_main_vsi(pf);
2275 /* Refresh link info and check if media is present */
2276 pi = vsi->port_info;
2277 err = ice_update_link_info(pi);
2281 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2283 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2284 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2285 ice_init_phy_user_cfg(pi);
2287 /* PHY settings are reset on media insertion, reconfigure
2288 * PHY to preserve settings.
2290 if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2291 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2294 err = ice_configure_phy(vsi);
2296 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2298 /* A Link Status Event will be generated; the event handler
2299 * will complete bringing the interface up
2305 * ice_service_task - manage and run subtasks
2306 * @work: pointer to work_struct contained by the PF struct
2308 static void ice_service_task(struct work_struct *work)
2310 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2311 unsigned long start_time = jiffies;
2315 /* process reset requests first */
2316 ice_reset_subtask(pf);
2318 /* bail if a reset/recovery cycle is pending or rebuild failed */
2319 if (ice_is_reset_in_progress(pf->state) ||
2320 test_bit(ICE_SUSPENDED, pf->state) ||
2321 test_bit(ICE_NEEDS_RESTART, pf->state)) {
2322 ice_service_task_complete(pf);
2326 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2327 struct iidc_event *event;
2329 event = kzalloc(sizeof(*event), GFP_KERNEL);
2331 set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2332 /* report the entire OICR value to AUX driver */
2333 swap(event->reg, pf->oicr_err_reg);
2334 ice_send_event_to_aux(pf, event);
2339 /* unplug aux dev per request, if an unplug request came in
2340 * while processing a plug request, this will handle it
2342 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2343 ice_unplug_aux_dev(pf);
2345 /* Plug aux device per request */
2346 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2347 ice_plug_aux_dev(pf);
2349 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2350 struct iidc_event *event;
2352 event = kzalloc(sizeof(*event), GFP_KERNEL);
2354 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2355 ice_send_event_to_aux(pf, event);
2360 ice_clean_adminq_subtask(pf);
2361 ice_check_media_subtask(pf);
2362 ice_check_for_hang_subtask(pf);
2363 ice_sync_fltr_subtask(pf);
2364 ice_handle_mdd_event(pf);
2365 ice_watchdog_subtask(pf);
2367 if (ice_is_safe_mode(pf)) {
2368 ice_service_task_complete(pf);
2372 ice_process_vflr_event(pf);
2373 ice_clean_mailboxq_subtask(pf);
2374 ice_clean_sbq_subtask(pf);
2375 ice_sync_arfs_fltrs(pf);
2376 ice_flush_fdir_ctx(pf);
2378 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2379 ice_service_task_complete(pf);
2381 /* If the tasks have taken longer than one service timer period
2382 * or there is more work to be done, reset the service timer to
2383 * schedule the service task now.
2385 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2386 test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2387 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2388 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2389 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2390 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2391 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2392 mod_timer(&pf->serv_tmr, jiffies);
2396 * ice_set_ctrlq_len - helper function to set controlq length
2397 * @hw: pointer to the HW instance
2399 static void ice_set_ctrlq_len(struct ice_hw *hw)
2401 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2402 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2403 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2404 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2405 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2406 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2407 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2408 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2409 hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2410 hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2411 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2412 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2416 * ice_schedule_reset - schedule a reset
2417 * @pf: board private structure
2418 * @reset: reset being requested
2420 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2422 struct device *dev = ice_pf_to_dev(pf);
2424 /* bail out if earlier reset has failed */
2425 if (test_bit(ICE_RESET_FAILED, pf->state)) {
2426 dev_dbg(dev, "earlier reset has failed\n");
2429 /* bail if reset/recovery already in progress */
2430 if (ice_is_reset_in_progress(pf->state)) {
2431 dev_dbg(dev, "Reset already in progress\n");
2437 set_bit(ICE_PFR_REQ, pf->state);
2439 case ICE_RESET_CORER:
2440 set_bit(ICE_CORER_REQ, pf->state);
2442 case ICE_RESET_GLOBR:
2443 set_bit(ICE_GLOBR_REQ, pf->state);
2449 ice_service_task_schedule(pf);
2454 * ice_irq_affinity_notify - Callback for affinity changes
2455 * @notify: context as to what irq was changed
2456 * @mask: the new affinity mask
2458 * This is a callback function used by the irq_set_affinity_notifier function
2459 * so that we may register to receive changes to the irq affinity masks.
2462 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2463 const cpumask_t *mask)
2465 struct ice_q_vector *q_vector =
2466 container_of(notify, struct ice_q_vector, affinity_notify);
2468 cpumask_copy(&q_vector->affinity_mask, mask);
2472 * ice_irq_affinity_release - Callback for affinity notifier release
2473 * @ref: internal core kernel usage
2475 * This is a callback function used by the irq_set_affinity_notifier function
2476 * to inform the current notification subscriber that they will no longer
2477 * receive notifications.
2479 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2482 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2483 * @vsi: the VSI being configured
2485 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2487 struct ice_hw *hw = &vsi->back->hw;
2490 ice_for_each_q_vector(vsi, i)
2491 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2498 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2499 * @vsi: the VSI being configured
2500 * @basename: name for the vector
2502 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2504 int q_vectors = vsi->num_q_vectors;
2505 struct ice_pf *pf = vsi->back;
2512 dev = ice_pf_to_dev(pf);
2513 for (vector = 0; vector < q_vectors; vector++) {
2514 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2516 irq_num = q_vector->irq.virq;
2518 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2519 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2520 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2522 } else if (q_vector->rx.rx_ring) {
2523 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2524 "%s-%s-%d", basename, "rx", rx_int_idx++);
2525 } else if (q_vector->tx.tx_ring) {
2526 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2527 "%s-%s-%d", basename, "tx", tx_int_idx++);
2529 /* skip this unused q_vector */
2532 if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2533 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2534 IRQF_SHARED, q_vector->name,
2537 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2538 0, q_vector->name, q_vector);
2540 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2545 /* register for affinity change notifications */
2546 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2547 struct irq_affinity_notify *affinity_notify;
2549 affinity_notify = &q_vector->affinity_notify;
2550 affinity_notify->notify = ice_irq_affinity_notify;
2551 affinity_notify->release = ice_irq_affinity_release;
2552 irq_set_affinity_notifier(irq_num, affinity_notify);
2555 /* assign the mask for this irq */
2556 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2559 err = ice_set_cpu_rx_rmap(vsi);
2561 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2562 vsi->vsi_num, ERR_PTR(err));
2566 vsi->irqs_ready = true;
2571 irq_num = vsi->q_vectors[vector]->irq.virq;
2572 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2573 irq_set_affinity_notifier(irq_num, NULL);
2574 irq_set_affinity_hint(irq_num, NULL);
2575 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2581 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2582 * @vsi: VSI to setup Tx rings used by XDP
2584 * Return 0 on success and negative value on error
2586 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2588 struct device *dev = ice_pf_to_dev(vsi->back);
2589 struct ice_tx_desc *tx_desc;
2592 ice_for_each_xdp_txq(vsi, i) {
2593 u16 xdp_q_idx = vsi->alloc_txq + i;
2594 struct ice_ring_stats *ring_stats;
2595 struct ice_tx_ring *xdp_ring;
2597 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2599 goto free_xdp_rings;
2601 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2603 ice_free_tx_ring(xdp_ring);
2604 goto free_xdp_rings;
2607 xdp_ring->ring_stats = ring_stats;
2608 xdp_ring->q_index = xdp_q_idx;
2609 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2610 xdp_ring->vsi = vsi;
2611 xdp_ring->netdev = NULL;
2612 xdp_ring->dev = dev;
2613 xdp_ring->count = vsi->num_tx_desc;
2614 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2615 if (ice_setup_tx_ring(xdp_ring))
2616 goto free_xdp_rings;
2617 ice_set_ring_xdp(xdp_ring);
2618 spin_lock_init(&xdp_ring->tx_lock);
2619 for (j = 0; j < xdp_ring->count; j++) {
2620 tx_desc = ICE_TX_DESC(xdp_ring, j);
2621 tx_desc->cmd_type_offset_bsz = 0;
2628 for (; i >= 0; i--) {
2629 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2630 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2631 vsi->xdp_rings[i]->ring_stats = NULL;
2632 ice_free_tx_ring(vsi->xdp_rings[i]);
2639 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2640 * @vsi: VSI to set the bpf prog on
2641 * @prog: the bpf prog pointer
2643 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2645 struct bpf_prog *old_prog;
2648 old_prog = xchg(&vsi->xdp_prog, prog);
2649 ice_for_each_rxq(vsi, i)
2650 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2653 bpf_prog_put(old_prog);
2657 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2658 * @vsi: VSI to bring up Tx rings used by XDP
2659 * @prog: bpf program that will be assigned to VSI
2661 * Return 0 on success and negative value on error
2663 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2665 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2666 int xdp_rings_rem = vsi->num_xdp_txq;
2667 struct ice_pf *pf = vsi->back;
2668 struct ice_qs_cfg xdp_qs_cfg = {
2669 .qs_mutex = &pf->avail_q_mutex,
2670 .pf_map = pf->avail_txqs,
2671 .pf_map_size = pf->max_pf_txqs,
2672 .q_count = vsi->num_xdp_txq,
2673 .scatter_count = ICE_MAX_SCATTER_TXQS,
2674 .vsi_map = vsi->txq_map,
2675 .vsi_map_offset = vsi->alloc_txq,
2676 .mapping_mode = ICE_VSI_MAP_CONTIG
2682 dev = ice_pf_to_dev(pf);
2683 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2684 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2685 if (!vsi->xdp_rings)
2688 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2689 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2692 if (static_key_enabled(&ice_xdp_locking_key))
2693 netdev_warn(vsi->netdev,
2694 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2696 if (ice_xdp_alloc_setup_rings(vsi))
2697 goto clear_xdp_rings;
2699 /* follow the logic from ice_vsi_map_rings_to_vectors */
2700 ice_for_each_q_vector(vsi, v_idx) {
2701 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2702 int xdp_rings_per_v, q_id, q_base;
2704 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2705 vsi->num_q_vectors - v_idx);
2706 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2708 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2709 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2711 xdp_ring->q_vector = q_vector;
2712 xdp_ring->next = q_vector->tx.tx_ring;
2713 q_vector->tx.tx_ring = xdp_ring;
2715 xdp_rings_rem -= xdp_rings_per_v;
2718 ice_for_each_rxq(vsi, i) {
2719 if (static_key_enabled(&ice_xdp_locking_key)) {
2720 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2722 struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2723 struct ice_tx_ring *ring;
2725 ice_for_each_tx_ring(ring, q_vector->tx) {
2726 if (ice_ring_is_xdp(ring)) {
2727 vsi->rx_rings[i]->xdp_ring = ring;
2732 ice_tx_xsk_pool(vsi, i);
2735 /* omit the scheduler update if in reset path; XDP queues will be
2736 * taken into account at the end of ice_vsi_rebuild, where
2737 * ice_cfg_vsi_lan is being called
2739 if (ice_is_reset_in_progress(pf->state))
2742 /* tell the Tx scheduler that right now we have
2745 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2746 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2748 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2751 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2753 goto clear_xdp_rings;
2756 /* assign the prog only when it's not already present on VSI;
2757 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2758 * VSI rebuild that happens under ethtool -L can expose us to
2759 * the bpf_prog refcount issues as we would be swapping same
2760 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2761 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2762 * this is not harmful as dev_xdp_install bumps the refcount
2763 * before calling the op exposed by the driver;
2765 if (!ice_is_xdp_ena_vsi(vsi))
2766 ice_vsi_assign_bpf_prog(vsi, prog);
2770 ice_for_each_xdp_txq(vsi, i)
2771 if (vsi->xdp_rings[i]) {
2772 kfree_rcu(vsi->xdp_rings[i], rcu);
2773 vsi->xdp_rings[i] = NULL;
2777 mutex_lock(&pf->avail_q_mutex);
2778 ice_for_each_xdp_txq(vsi, i) {
2779 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2780 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2782 mutex_unlock(&pf->avail_q_mutex);
2784 devm_kfree(dev, vsi->xdp_rings);
2789 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2790 * @vsi: VSI to remove XDP rings
2792 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2795 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2797 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2798 struct ice_pf *pf = vsi->back;
2801 /* q_vectors are freed in reset path so there's no point in detaching
2802 * rings; in case of rebuild being triggered not from reset bits
2803 * in pf->state won't be set, so additionally check first q_vector
2806 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2809 ice_for_each_q_vector(vsi, v_idx) {
2810 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2811 struct ice_tx_ring *ring;
2813 ice_for_each_tx_ring(ring, q_vector->tx)
2814 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2817 /* restore the value of last node prior to XDP setup */
2818 q_vector->tx.tx_ring = ring;
2822 mutex_lock(&pf->avail_q_mutex);
2823 ice_for_each_xdp_txq(vsi, i) {
2824 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2825 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2827 mutex_unlock(&pf->avail_q_mutex);
2829 ice_for_each_xdp_txq(vsi, i)
2830 if (vsi->xdp_rings[i]) {
2831 if (vsi->xdp_rings[i]->desc) {
2833 ice_free_tx_ring(vsi->xdp_rings[i]);
2835 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2836 vsi->xdp_rings[i]->ring_stats = NULL;
2837 kfree_rcu(vsi->xdp_rings[i], rcu);
2838 vsi->xdp_rings[i] = NULL;
2841 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2842 vsi->xdp_rings = NULL;
2844 if (static_key_enabled(&ice_xdp_locking_key))
2845 static_branch_dec(&ice_xdp_locking_key);
2847 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2850 ice_vsi_assign_bpf_prog(vsi, NULL);
2852 /* notify Tx scheduler that we destroyed XDP queues and bring
2853 * back the old number of child nodes
2855 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2856 max_txqs[i] = vsi->num_txq;
2858 /* change number of XDP Tx queues to 0 */
2859 vsi->num_xdp_txq = 0;
2861 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2866 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2867 * @vsi: VSI to schedule napi on
2869 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2873 ice_for_each_rxq(vsi, i) {
2874 struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2876 if (rx_ring->xsk_pool)
2877 napi_schedule(&rx_ring->q_vector->napi);
2882 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2883 * @vsi: VSI to determine the count of XDP Tx qs
2885 * returns 0 if Tx qs count is higher than at least half of CPU count,
2888 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2890 u16 avail = ice_get_avail_txq_count(vsi->back);
2891 u16 cpus = num_possible_cpus();
2893 if (avail < cpus / 2)
2896 vsi->num_xdp_txq = min_t(u16, avail, cpus);
2898 if (vsi->num_xdp_txq < cpus)
2899 static_branch_inc(&ice_xdp_locking_key);
2905 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2906 * @vsi: Pointer to VSI structure
2908 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2910 if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
2911 return ICE_RXBUF_1664;
2913 return ICE_RXBUF_3072;
2917 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2918 * @vsi: VSI to setup XDP for
2919 * @prog: XDP program
2920 * @extack: netlink extended ack
2923 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2924 struct netlink_ext_ack *extack)
2926 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2927 bool if_running = netif_running(vsi->netdev);
2928 int ret = 0, xdp_ring_err = 0;
2930 if (prog && !prog->aux->xdp_has_frags) {
2931 if (frame_size > ice_max_xdp_frame_size(vsi)) {
2932 NL_SET_ERR_MSG_MOD(extack,
2933 "MTU is too large for linear frames and XDP prog does not support frags");
2938 /* hot swap progs and avoid toggling link */
2939 if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
2940 ice_vsi_assign_bpf_prog(vsi, prog);
2944 /* need to stop netdev while setting up the program for Rx rings */
2945 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2946 ret = ice_down(vsi);
2948 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2953 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2954 xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2956 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2958 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2960 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2962 xdp_features_set_redirect_target(vsi->netdev, true);
2963 /* reallocate Rx queues that are used for zero-copy */
2964 xdp_ring_err = ice_realloc_zc_buf(vsi, true);
2966 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
2967 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2968 xdp_features_clear_redirect_target(vsi->netdev);
2969 xdp_ring_err = ice_destroy_xdp_rings(vsi);
2971 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2972 /* reallocate Rx queues that were used for zero-copy */
2973 xdp_ring_err = ice_realloc_zc_buf(vsi, false);
2975 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
2982 ice_vsi_rx_napi_schedule(vsi);
2984 return (ret || xdp_ring_err) ? -ENOMEM : 0;
2988 * ice_xdp_safe_mode - XDP handler for safe mode
2992 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2993 struct netdev_bpf *xdp)
2995 NL_SET_ERR_MSG_MOD(xdp->extack,
2996 "Please provide working DDP firmware package in order to use XDP\n"
2997 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
3002 * ice_xdp - implements XDP handler
3006 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3008 struct ice_netdev_priv *np = netdev_priv(dev);
3009 struct ice_vsi *vsi = np->vsi;
3011 if (vsi->type != ICE_VSI_PF) {
3012 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
3016 switch (xdp->command) {
3017 case XDP_SETUP_PROG:
3018 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3019 case XDP_SETUP_XSK_POOL:
3020 return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
3028 * ice_ena_misc_vector - enable the non-queue interrupts
3029 * @pf: board private structure
3031 static void ice_ena_misc_vector(struct ice_pf *pf)
3033 struct ice_hw *hw = &pf->hw;
3036 /* Disable anti-spoof detection interrupt to prevent spurious event
3037 * interrupts during a function reset. Anti-spoof functionally is
3040 val = rd32(hw, GL_MDCK_TX_TDPU);
3041 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3042 wr32(hw, GL_MDCK_TX_TDPU, val);
3044 /* clear things first */
3045 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
3046 rd32(hw, PFINT_OICR); /* read to clear */
3048 val = (PFINT_OICR_ECC_ERR_M |
3049 PFINT_OICR_MAL_DETECT_M |
3051 PFINT_OICR_PCI_EXCEPTION_M |
3053 PFINT_OICR_HMC_ERR_M |
3054 PFINT_OICR_PE_PUSH_M |
3055 PFINT_OICR_PE_CRITERR_M);
3057 wr32(hw, PFINT_OICR_ENA, val);
3059 /* SW_ITR_IDX = 0, but don't change INTENA */
3060 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3061 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3065 * ice_misc_intr - misc interrupt handler
3066 * @irq: interrupt number
3067 * @data: pointer to a q_vector
3069 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3071 struct ice_pf *pf = (struct ice_pf *)data;
3072 struct ice_hw *hw = &pf->hw;
3076 dev = ice_pf_to_dev(pf);
3077 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3078 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3079 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3081 oicr = rd32(hw, PFINT_OICR);
3082 ena_mask = rd32(hw, PFINT_OICR_ENA);
3084 if (oicr & PFINT_OICR_SWINT_M) {
3085 ena_mask &= ~PFINT_OICR_SWINT_M;
3089 if (oicr & PFINT_OICR_MAL_DETECT_M) {
3090 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3091 set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3093 if (oicr & PFINT_OICR_VFLR_M) {
3094 /* disable any further VFLR event notifications */
3095 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3096 u32 reg = rd32(hw, PFINT_OICR_ENA);
3098 reg &= ~PFINT_OICR_VFLR_M;
3099 wr32(hw, PFINT_OICR_ENA, reg);
3101 ena_mask &= ~PFINT_OICR_VFLR_M;
3102 set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3106 if (oicr & PFINT_OICR_GRST_M) {
3109 /* we have a reset warning */
3110 ena_mask &= ~PFINT_OICR_GRST_M;
3111 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
3112 GLGEN_RSTAT_RESET_TYPE_S;
3114 if (reset == ICE_RESET_CORER)
3116 else if (reset == ICE_RESET_GLOBR)
3118 else if (reset == ICE_RESET_EMPR)
3121 dev_dbg(dev, "Invalid reset type %d\n", reset);
3123 /* If a reset cycle isn't already in progress, we set a bit in
3124 * pf->state so that the service task can start a reset/rebuild.
3126 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3127 if (reset == ICE_RESET_CORER)
3128 set_bit(ICE_CORER_RECV, pf->state);
3129 else if (reset == ICE_RESET_GLOBR)
3130 set_bit(ICE_GLOBR_RECV, pf->state);
3132 set_bit(ICE_EMPR_RECV, pf->state);
3134 /* There are couple of different bits at play here.
3135 * hw->reset_ongoing indicates whether the hardware is
3136 * in reset. This is set to true when a reset interrupt
3137 * is received and set back to false after the driver
3138 * has determined that the hardware is out of reset.
3140 * ICE_RESET_OICR_RECV in pf->state indicates
3141 * that a post reset rebuild is required before the
3142 * driver is operational again. This is set above.
3144 * As this is the start of the reset/rebuild cycle, set
3145 * both to indicate that.
3147 hw->reset_ongoing = true;
3151 if (oicr & PFINT_OICR_TSYN_TX_M) {
3152 ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3153 if (!hw->reset_ongoing && ice_ptp_pf_handles_tx_interrupt(pf))
3154 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
3157 if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3158 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3159 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3161 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3163 if (ice_pf_src_tmr_owned(pf)) {
3164 /* Save EVENTs from GLTSYN register */
3165 pf->ptp.ext_ts_irq |= gltsyn_stat &
3166 (GLTSYN_STAT_EVENT0_M |
3167 GLTSYN_STAT_EVENT1_M |
3168 GLTSYN_STAT_EVENT2_M);
3170 set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread);
3174 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3175 if (oicr & ICE_AUX_CRIT_ERR) {
3176 pf->oicr_err_reg |= oicr;
3177 set_bit(ICE_AUX_ERR_PENDING, pf->state);
3178 ena_mask &= ~ICE_AUX_CRIT_ERR;
3181 /* Report any remaining unexpected interrupts */
3184 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3185 /* If a critical error is pending there is no choice but to
3188 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3189 PFINT_OICR_ECC_ERR_M)) {
3190 set_bit(ICE_PFR_REQ, pf->state);
3194 return IRQ_WAKE_THREAD;
3198 * ice_misc_intr_thread_fn - misc interrupt thread function
3199 * @irq: interrupt number
3200 * @data: pointer to a q_vector
3202 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3204 struct ice_pf *pf = data;
3209 if (ice_is_reset_in_progress(pf->state))
3212 ice_service_task_schedule(pf);
3214 if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread))
3215 ice_ptp_extts_event(pf);
3217 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3218 /* Process outstanding Tx timestamps. If there is more work,
3219 * re-arm the interrupt to trigger again.
3221 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
3222 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
3227 ice_irq_dynamic_ena(hw, NULL, NULL);
3233 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3234 * @hw: pointer to HW structure
3236 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3238 /* disable Admin queue Interrupt causes */
3239 wr32(hw, PFINT_FW_CTL,
3240 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3242 /* disable Mailbox queue Interrupt causes */
3243 wr32(hw, PFINT_MBX_CTL,
3244 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3246 wr32(hw, PFINT_SB_CTL,
3247 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3249 /* disable Control queue Interrupt causes */
3250 wr32(hw, PFINT_OICR_CTL,
3251 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3257 * ice_free_irq_msix_misc - Unroll misc vector setup
3258 * @pf: board private structure
3260 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3262 int misc_irq_num = pf->oicr_irq.virq;
3263 struct ice_hw *hw = &pf->hw;
3265 ice_dis_ctrlq_interrupts(hw);
3267 /* disable OICR interrupt */
3268 wr32(hw, PFINT_OICR_ENA, 0);
3271 synchronize_irq(misc_irq_num);
3272 devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
3274 ice_free_irq(pf, pf->oicr_irq);
3278 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3279 * @hw: pointer to HW structure
3280 * @reg_idx: HW vector index to associate the control queue interrupts with
3282 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3286 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3287 PFINT_OICR_CTL_CAUSE_ENA_M);
3288 wr32(hw, PFINT_OICR_CTL, val);
3290 /* enable Admin queue Interrupt causes */
3291 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3292 PFINT_FW_CTL_CAUSE_ENA_M);
3293 wr32(hw, PFINT_FW_CTL, val);
3295 /* enable Mailbox queue Interrupt causes */
3296 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3297 PFINT_MBX_CTL_CAUSE_ENA_M);
3298 wr32(hw, PFINT_MBX_CTL, val);
3300 /* This enables Sideband queue Interrupt causes */
3301 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3302 PFINT_SB_CTL_CAUSE_ENA_M);
3303 wr32(hw, PFINT_SB_CTL, val);
3309 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3310 * @pf: board private structure
3312 * This sets up the handler for MSIX 0, which is used to manage the
3313 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3314 * when in MSI or Legacy interrupt mode.
3316 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3318 struct device *dev = ice_pf_to_dev(pf);
3319 struct ice_hw *hw = &pf->hw;
3320 struct msi_map oicr_irq;
3323 if (!pf->int_name[0])
3324 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3325 dev_driver_string(dev), dev_name(dev));
3327 /* Do not request IRQ but do enable OICR interrupt since settings are
3328 * lost during reset. Note that this function is called only during
3329 * rebuild path and not while reset is in progress.
3331 if (ice_is_reset_in_progress(pf->state))
3334 /* reserve one vector in irq_tracker for misc interrupts */
3335 oicr_irq = ice_alloc_irq(pf, false);
3336 if (oicr_irq.index < 0)
3337 return oicr_irq.index;
3339 pf->oicr_irq = oicr_irq;
3340 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3341 ice_misc_intr_thread_fn, 0,
3344 dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3346 ice_free_irq(pf, pf->oicr_irq);
3351 ice_ena_misc_vector(pf);
3353 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3354 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3355 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3358 ice_irq_dynamic_ena(hw, NULL, NULL);
3364 * ice_napi_add - register NAPI handler for the VSI
3365 * @vsi: VSI for which NAPI handler is to be registered
3367 * This function is only called in the driver's load path. Registering the NAPI
3368 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3369 * reset/rebuild, etc.)
3371 static void ice_napi_add(struct ice_vsi *vsi)
3378 ice_for_each_q_vector(vsi, v_idx)
3379 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3384 * ice_set_ops - set netdev and ethtools ops for the given netdev
3385 * @vsi: the VSI associated with the new netdev
3387 static void ice_set_ops(struct ice_vsi *vsi)
3389 struct net_device *netdev = vsi->netdev;
3390 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3392 if (ice_is_safe_mode(pf)) {
3393 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3394 ice_set_ethtool_safe_mode_ops(netdev);
3398 netdev->netdev_ops = &ice_netdev_ops;
3399 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3400 ice_set_ethtool_ops(netdev);
3402 if (vsi->type != ICE_VSI_PF)
3405 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3406 NETDEV_XDP_ACT_XSK_ZEROCOPY |
3407 NETDEV_XDP_ACT_RX_SG;
3408 netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
3412 * ice_set_netdev_features - set features for the given netdev
3413 * @netdev: netdev instance
3415 static void ice_set_netdev_features(struct net_device *netdev)
3417 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3418 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3419 netdev_features_t csumo_features;
3420 netdev_features_t vlano_features;
3421 netdev_features_t dflt_features;
3422 netdev_features_t tso_features;
3424 if (ice_is_safe_mode(pf)) {
3426 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3427 netdev->hw_features = netdev->features;
3431 dflt_features = NETIF_F_SG |
3436 csumo_features = NETIF_F_RXCSUM |
3441 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3442 NETIF_F_HW_VLAN_CTAG_TX |
3443 NETIF_F_HW_VLAN_CTAG_RX;
3445 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3447 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3449 tso_features = NETIF_F_TSO |
3453 NETIF_F_GSO_UDP_TUNNEL |
3454 NETIF_F_GSO_GRE_CSUM |
3455 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3456 NETIF_F_GSO_PARTIAL |
3457 NETIF_F_GSO_IPXIP4 |
3458 NETIF_F_GSO_IPXIP6 |
3461 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3462 NETIF_F_GSO_GRE_CSUM;
3463 /* set features that user can change */
3464 netdev->hw_features = dflt_features | csumo_features |
3465 vlano_features | tso_features;
3467 /* add support for HW_CSUM on packets with MPLS header */
3468 netdev->mpls_features = NETIF_F_HW_CSUM |
3472 /* enable features */
3473 netdev->features |= netdev->hw_features;
3475 netdev->hw_features |= NETIF_F_HW_TC;
3476 netdev->hw_features |= NETIF_F_LOOPBACK;
3478 /* encap and VLAN devices inherit default, csumo and tso features */
3479 netdev->hw_enc_features |= dflt_features | csumo_features |
3481 netdev->vlan_features |= dflt_features | csumo_features |
3484 /* advertise support but don't enable by default since only one type of
3485 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3486 * type turns on the other has to be turned off. This is enforced by the
3487 * ice_fix_features() ndo callback.
3490 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3491 NETIF_F_HW_VLAN_STAG_TX;
3493 /* Leave CRC / FCS stripping enabled by default, but allow the value to
3494 * be changed at runtime
3496 netdev->hw_features |= NETIF_F_RXFCS;
3498 netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
3502 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3503 * @lut: Lookup table
3504 * @rss_table_size: Lookup table size
3505 * @rss_size: Range of queue number for hashing
3507 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3511 for (i = 0; i < rss_table_size; i++)
3512 lut[i] = i % rss_size;
3516 * ice_pf_vsi_setup - Set up a PF VSI
3517 * @pf: board private structure
3518 * @pi: pointer to the port_info instance
3520 * Returns pointer to the successfully allocated VSI software struct
3521 * on success, otherwise returns NULL on failure.
3523 static struct ice_vsi *
3524 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3526 struct ice_vsi_cfg_params params = {};
3528 params.type = ICE_VSI_PF;
3530 params.flags = ICE_VSI_FLAG_INIT;
3532 return ice_vsi_setup(pf, ¶ms);
3535 static struct ice_vsi *
3536 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3537 struct ice_channel *ch)
3539 struct ice_vsi_cfg_params params = {};
3541 params.type = ICE_VSI_CHNL;
3544 params.flags = ICE_VSI_FLAG_INIT;
3546 return ice_vsi_setup(pf, ¶ms);
3550 * ice_ctrl_vsi_setup - Set up a control VSI
3551 * @pf: board private structure
3552 * @pi: pointer to the port_info instance
3554 * Returns pointer to the successfully allocated VSI software struct
3555 * on success, otherwise returns NULL on failure.
3557 static struct ice_vsi *
3558 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3560 struct ice_vsi_cfg_params params = {};
3562 params.type = ICE_VSI_CTRL;
3564 params.flags = ICE_VSI_FLAG_INIT;
3566 return ice_vsi_setup(pf, ¶ms);
3570 * ice_lb_vsi_setup - Set up a loopback VSI
3571 * @pf: board private structure
3572 * @pi: pointer to the port_info instance
3574 * Returns pointer to the successfully allocated VSI software struct
3575 * on success, otherwise returns NULL on failure.
3578 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3580 struct ice_vsi_cfg_params params = {};
3582 params.type = ICE_VSI_LB;
3584 params.flags = ICE_VSI_FLAG_INIT;
3586 return ice_vsi_setup(pf, ¶ms);
3590 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3591 * @netdev: network interface to be adjusted
3593 * @vid: VLAN ID to be added
3595 * net_device_ops implementation for adding VLAN IDs
3598 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3600 struct ice_netdev_priv *np = netdev_priv(netdev);
3601 struct ice_vsi_vlan_ops *vlan_ops;
3602 struct ice_vsi *vsi = np->vsi;
3603 struct ice_vlan vlan;
3606 /* VLAN 0 is added by default during load/reset */
3610 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3611 usleep_range(1000, 2000);
3613 /* Add multicast promisc rule for the VLAN ID to be added if
3614 * all-multicast is currently enabled.
3616 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3617 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3618 ICE_MCAST_VLAN_PROMISC_BITS,
3624 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3626 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3627 * packets aren't pruned by the device's internal switch on Rx
3629 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3630 ret = vlan_ops->add_vlan(vsi, &vlan);
3634 /* If all-multicast is currently enabled and this VLAN ID is only one
3635 * besides VLAN-0 we have to update look-up type of multicast promisc
3636 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3638 if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3639 ice_vsi_num_non_zero_vlans(vsi) == 1) {
3640 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3641 ICE_MCAST_PROMISC_BITS, 0);
3642 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3643 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3647 clear_bit(ICE_CFG_BUSY, vsi->state);
3653 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3654 * @netdev: network interface to be adjusted
3656 * @vid: VLAN ID to be removed
3658 * net_device_ops implementation for removing VLAN IDs
3661 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3663 struct ice_netdev_priv *np = netdev_priv(netdev);
3664 struct ice_vsi_vlan_ops *vlan_ops;
3665 struct ice_vsi *vsi = np->vsi;
3666 struct ice_vlan vlan;
3669 /* don't allow removal of VLAN 0 */
3673 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3674 usleep_range(1000, 2000);
3676 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3677 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3679 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3681 vsi->current_netdev_flags |= IFF_ALLMULTI;
3684 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3686 /* Make sure VLAN delete is successful before updating VLAN
3689 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3690 ret = vlan_ops->del_vlan(vsi, &vlan);
3694 /* Remove multicast promisc rule for the removed VLAN ID if
3695 * all-multicast is enabled.
3697 if (vsi->current_netdev_flags & IFF_ALLMULTI)
3698 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3699 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3701 if (!ice_vsi_has_non_zero_vlans(vsi)) {
3702 /* Update look-up type of multicast promisc rule for VLAN 0
3703 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3704 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3706 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3707 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3708 ICE_MCAST_VLAN_PROMISC_BITS,
3710 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3711 ICE_MCAST_PROMISC_BITS, 0);
3716 clear_bit(ICE_CFG_BUSY, vsi->state);
3722 * ice_rep_indr_tc_block_unbind
3723 * @cb_priv: indirection block private data
3725 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3727 struct ice_indr_block_priv *indr_priv = cb_priv;
3729 list_del(&indr_priv->list);
3734 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3735 * @vsi: VSI struct which has the netdev
3737 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3739 struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3741 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3742 ice_rep_indr_tc_block_unbind);
3746 * ice_tc_indir_block_register - Register TC indirect block notifications
3747 * @vsi: VSI struct which has the netdev
3749 * Returns 0 on success, negative value on failure
3751 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3753 struct ice_netdev_priv *np;
3755 if (!vsi || !vsi->netdev)
3758 np = netdev_priv(vsi->netdev);
3760 INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3761 return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3765 * ice_get_avail_q_count - Get count of queues in use
3766 * @pf_qmap: bitmap to get queue use count from
3767 * @lock: pointer to a mutex that protects access to pf_qmap
3768 * @size: size of the bitmap
3771 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3777 for_each_clear_bit(bit, pf_qmap, size)
3785 * ice_get_avail_txq_count - Get count of Tx queues in use
3786 * @pf: pointer to an ice_pf instance
3788 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3790 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3795 * ice_get_avail_rxq_count - Get count of Rx queues in use
3796 * @pf: pointer to an ice_pf instance
3798 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3800 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3805 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3806 * @pf: board private structure to initialize
3808 static void ice_deinit_pf(struct ice_pf *pf)
3810 ice_service_task_stop(pf);
3811 mutex_destroy(&pf->lag_mutex);
3812 mutex_destroy(&pf->adev_mutex);
3813 mutex_destroy(&pf->sw_mutex);
3814 mutex_destroy(&pf->tc_mutex);
3815 mutex_destroy(&pf->avail_q_mutex);
3816 mutex_destroy(&pf->vfs.table_lock);
3818 if (pf->avail_txqs) {
3819 bitmap_free(pf->avail_txqs);
3820 pf->avail_txqs = NULL;
3823 if (pf->avail_rxqs) {
3824 bitmap_free(pf->avail_rxqs);
3825 pf->avail_rxqs = NULL;
3829 ptp_clock_unregister(pf->ptp.clock);
3833 * ice_set_pf_caps - set PFs capability flags
3834 * @pf: pointer to the PF instance
3836 static void ice_set_pf_caps(struct ice_pf *pf)
3838 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3840 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3841 if (func_caps->common_cap.rdma)
3842 set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3843 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3844 if (func_caps->common_cap.dcb)
3845 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3846 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3847 if (func_caps->common_cap.sr_iov_1_1) {
3848 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3849 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
3852 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3853 if (func_caps->common_cap.rss_table_size)
3854 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3856 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3857 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3860 /* ctrl_vsi_idx will be set to a valid value when flow director
3861 * is setup by ice_init_fdir
3863 pf->ctrl_vsi_idx = ICE_NO_VSI;
3864 set_bit(ICE_FLAG_FD_ENA, pf->flags);
3865 /* force guaranteed filter pool for PF */
3866 ice_alloc_fd_guar_item(&pf->hw, &unused,
3867 func_caps->fd_fltr_guar);
3868 /* force shared filter pool for PF */
3869 ice_alloc_fd_shrd_item(&pf->hw, &unused,
3870 func_caps->fd_fltr_best_effort);
3873 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3874 if (func_caps->common_cap.ieee_1588 &&
3875 !(pf->hw.mac_type == ICE_MAC_E830))
3876 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3878 pf->max_pf_txqs = func_caps->common_cap.num_txq;
3879 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3883 * ice_init_pf - Initialize general software structures (struct ice_pf)
3884 * @pf: board private structure to initialize
3886 static int ice_init_pf(struct ice_pf *pf)
3888 ice_set_pf_caps(pf);
3890 mutex_init(&pf->sw_mutex);
3891 mutex_init(&pf->tc_mutex);
3892 mutex_init(&pf->adev_mutex);
3893 mutex_init(&pf->lag_mutex);
3895 INIT_HLIST_HEAD(&pf->aq_wait_list);
3896 spin_lock_init(&pf->aq_wait_lock);
3897 init_waitqueue_head(&pf->aq_wait_queue);
3899 init_waitqueue_head(&pf->reset_wait_queue);
3901 /* setup service timer and periodic service task */
3902 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3903 pf->serv_tmr_period = HZ;
3904 INIT_WORK(&pf->serv_task, ice_service_task);
3905 clear_bit(ICE_SERVICE_SCHED, pf->state);
3907 mutex_init(&pf->avail_q_mutex);
3908 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3909 if (!pf->avail_txqs)
3912 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3913 if (!pf->avail_rxqs) {
3914 bitmap_free(pf->avail_txqs);
3915 pf->avail_txqs = NULL;
3919 mutex_init(&pf->vfs.table_lock);
3920 hash_init(pf->vfs.table);
3921 ice_mbx_init_snapshot(&pf->hw);
3927 * ice_is_wol_supported - check if WoL is supported
3928 * @hw: pointer to hardware info
3930 * Check if WoL is supported based on the HW configuration.
3931 * Returns true if NVM supports and enables WoL for this port, false otherwise
3933 bool ice_is_wol_supported(struct ice_hw *hw)
3937 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3938 * word) indicates WoL is not supported on the corresponding PF ID.
3940 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3943 return !(BIT(hw->port_info->lport) & wol_ctrl);
3947 * ice_vsi_recfg_qs - Change the number of queues on a VSI
3948 * @vsi: VSI being changed
3949 * @new_rx: new number of Rx queues
3950 * @new_tx: new number of Tx queues
3951 * @locked: is adev device_lock held
3953 * Only change the number of queues if new_tx, or new_rx is non-0.
3955 * Returns 0 on success.
3957 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
3959 struct ice_pf *pf = vsi->back;
3960 int err = 0, timeout = 50;
3962 if (!new_rx && !new_tx)
3965 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3969 usleep_range(1000, 2000);
3973 vsi->req_txq = (u16)new_tx;
3975 vsi->req_rxq = (u16)new_rx;
3977 /* set for the next time the netdev is started */
3978 if (!netif_running(vsi->netdev)) {
3979 ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
3980 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3985 ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
3986 ice_pf_dcb_recfg(pf, locked);
3989 clear_bit(ICE_CFG_BUSY, pf->state);
3994 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3995 * @pf: PF to configure
3997 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3998 * VSI can still Tx/Rx VLAN tagged packets.
4000 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4002 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4003 struct ice_vsi_ctx *ctxt;
4010 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4015 ctxt->info = vsi->info;
4017 ctxt->info.valid_sections =
4018 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4019 ICE_AQ_VSI_PROP_SECURITY_VALID |
4020 ICE_AQ_VSI_PROP_SW_VALID);
4022 /* disable VLAN anti-spoof */
4023 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4024 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4026 /* disable VLAN pruning and keep all other settings */
4027 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4029 /* allow all VLANs on Tx and don't strip on Rx */
4030 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4031 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4033 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4035 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4036 status, ice_aq_str(hw->adminq.sq_last_status));
4038 vsi->info.sec_flags = ctxt->info.sec_flags;
4039 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4040 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4047 * ice_log_pkg_init - log result of DDP package load
4048 * @hw: pointer to hardware info
4049 * @state: state of package load
4051 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4053 struct ice_pf *pf = hw->back;
4056 dev = ice_pf_to_dev(pf);
4059 case ICE_DDP_PKG_SUCCESS:
4060 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4061 hw->active_pkg_name,
4062 hw->active_pkg_ver.major,
4063 hw->active_pkg_ver.minor,
4064 hw->active_pkg_ver.update,
4065 hw->active_pkg_ver.draft);
4067 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4068 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4069 hw->active_pkg_name,
4070 hw->active_pkg_ver.major,
4071 hw->active_pkg_ver.minor,
4072 hw->active_pkg_ver.update,
4073 hw->active_pkg_ver.draft);
4075 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4076 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
4077 hw->active_pkg_name,
4078 hw->active_pkg_ver.major,
4079 hw->active_pkg_ver.minor,
4080 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4082 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4083 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4084 hw->active_pkg_name,
4085 hw->active_pkg_ver.major,
4086 hw->active_pkg_ver.minor,
4087 hw->active_pkg_ver.update,
4088 hw->active_pkg_ver.draft,
4095 case ICE_DDP_PKG_FW_MISMATCH:
4096 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
4098 case ICE_DDP_PKG_INVALID_FILE:
4099 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4101 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4102 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
4104 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4105 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
4106 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4108 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4109 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
4111 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4112 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
4114 case ICE_DDP_PKG_LOAD_ERROR:
4115 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
4116 /* poll for reset to complete */
4117 if (ice_check_reset(hw))
4118 dev_err(dev, "Error resetting device. Please reload the driver\n");
4120 case ICE_DDP_PKG_ERR:
4122 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
4128 * ice_load_pkg - load/reload the DDP Package file
4129 * @firmware: firmware structure when firmware requested or NULL for reload
4130 * @pf: pointer to the PF instance
4132 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4133 * initialize HW tables.
4136 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4138 enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4139 struct device *dev = ice_pf_to_dev(pf);
4140 struct ice_hw *hw = &pf->hw;
4142 /* Load DDP Package */
4143 if (firmware && !hw->pkg_copy) {
4144 state = ice_copy_and_init_pkg(hw, firmware->data,
4146 ice_log_pkg_init(hw, state);
4147 } else if (!firmware && hw->pkg_copy) {
4148 /* Reload package during rebuild after CORER/GLOBR reset */
4149 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4150 ice_log_pkg_init(hw, state);
4152 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4155 if (!ice_is_init_pkg_successful(state)) {
4157 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4161 /* Successful download package is the precondition for advanced
4162 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4164 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4168 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4169 * @pf: pointer to the PF structure
4171 * There is no error returned here because the driver should be able to handle
4172 * 128 Byte cache lines, so we only print a warning in case issues are seen,
4173 * specifically with Tx.
4175 static void ice_verify_cacheline_size(struct ice_pf *pf)
4177 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4178 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4179 ICE_CACHE_LINE_BYTES);
4183 * ice_send_version - update firmware with driver version
4186 * Returns 0 on success, else error code
4188 static int ice_send_version(struct ice_pf *pf)
4190 struct ice_driver_ver dv;
4192 dv.major_ver = 0xff;
4193 dv.minor_ver = 0xff;
4194 dv.build_ver = 0xff;
4195 dv.subbuild_ver = 0;
4196 strscpy((char *)dv.driver_string, UTS_RELEASE,
4197 sizeof(dv.driver_string));
4198 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4202 * ice_init_fdir - Initialize flow director VSI and configuration
4203 * @pf: pointer to the PF instance
4205 * returns 0 on success, negative on error
4207 static int ice_init_fdir(struct ice_pf *pf)
4209 struct device *dev = ice_pf_to_dev(pf);
4210 struct ice_vsi *ctrl_vsi;
4213 /* Side Band Flow Director needs to have a control VSI.
4214 * Allocate it and store it in the PF.
4216 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4218 dev_dbg(dev, "could not create control VSI\n");
4222 err = ice_vsi_open_ctrl(ctrl_vsi);
4224 dev_dbg(dev, "could not open control VSI\n");
4228 mutex_init(&pf->hw.fdir_fltr_lock);
4230 err = ice_fdir_create_dflt_rules(pf);
4237 ice_fdir_release_flows(&pf->hw);
4238 ice_vsi_close(ctrl_vsi);
4240 ice_vsi_release(ctrl_vsi);
4241 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4242 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4243 pf->ctrl_vsi_idx = ICE_NO_VSI;
4248 static void ice_deinit_fdir(struct ice_pf *pf)
4250 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4255 ice_vsi_manage_fdir(vsi, false);
4256 ice_vsi_release(vsi);
4257 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4258 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4259 pf->ctrl_vsi_idx = ICE_NO_VSI;
4262 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4266 * ice_get_opt_fw_name - return optional firmware file name or NULL
4267 * @pf: pointer to the PF instance
4269 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4271 /* Optional firmware name same as default with additional dash
4272 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4274 struct pci_dev *pdev = pf->pdev;
4275 char *opt_fw_filename;
4278 /* Determine the name of the optional file using the DSN (two
4279 * dwords following the start of the DSN Capability).
4281 dsn = pci_get_dsn(pdev);
4285 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4286 if (!opt_fw_filename)
4289 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4290 ICE_DDP_PKG_PATH, dsn);
4292 return opt_fw_filename;
4296 * ice_request_fw - Device initialization routine
4297 * @pf: pointer to the PF instance
4299 static void ice_request_fw(struct ice_pf *pf)
4301 char *opt_fw_filename = ice_get_opt_fw_name(pf);
4302 const struct firmware *firmware = NULL;
4303 struct device *dev = ice_pf_to_dev(pf);
4306 /* optional device-specific DDP (if present) overrides the default DDP
4307 * package file. kernel logs a debug message if the file doesn't exist,
4308 * and warning messages for other errors.
4310 if (opt_fw_filename) {
4311 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4313 kfree(opt_fw_filename);
4317 /* request for firmware was successful. Download to device */
4318 ice_load_pkg(firmware, pf);
4319 kfree(opt_fw_filename);
4320 release_firmware(firmware);
4325 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4327 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4331 /* request for firmware was successful. Download to device */
4332 ice_load_pkg(firmware, pf);
4333 release_firmware(firmware);
4337 * ice_print_wake_reason - show the wake up cause in the log
4338 * @pf: pointer to the PF struct
4340 static void ice_print_wake_reason(struct ice_pf *pf)
4342 u32 wus = pf->wakeup_reason;
4343 const char *wake_str;
4345 /* if no wake event, nothing to print */
4349 if (wus & PFPM_WUS_LNKC_M)
4350 wake_str = "Link\n";
4351 else if (wus & PFPM_WUS_MAG_M)
4352 wake_str = "Magic Packet\n";
4353 else if (wus & PFPM_WUS_MNG_M)
4354 wake_str = "Management\n";
4355 else if (wus & PFPM_WUS_FW_RST_WK_M)
4356 wake_str = "Firmware Reset\n";
4358 wake_str = "Unknown\n";
4360 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4364 * ice_register_netdev - register netdev
4365 * @vsi: pointer to the VSI struct
4367 static int ice_register_netdev(struct ice_vsi *vsi)
4371 if (!vsi || !vsi->netdev)
4374 err = register_netdev(vsi->netdev);
4378 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4379 netif_carrier_off(vsi->netdev);
4380 netif_tx_stop_all_queues(vsi->netdev);
4385 static void ice_unregister_netdev(struct ice_vsi *vsi)
4387 if (!vsi || !vsi->netdev)
4390 unregister_netdev(vsi->netdev);
4391 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4395 * ice_cfg_netdev - Allocate, configure and register a netdev
4396 * @vsi: the VSI associated with the new netdev
4398 * Returns 0 on success, negative value on failure
4400 static int ice_cfg_netdev(struct ice_vsi *vsi)
4402 struct ice_netdev_priv *np;
4403 struct net_device *netdev;
4404 u8 mac_addr[ETH_ALEN];
4406 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4411 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4412 vsi->netdev = netdev;
4413 np = netdev_priv(netdev);
4416 ice_set_netdev_features(netdev);
4419 if (vsi->type == ICE_VSI_PF) {
4420 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4421 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4422 eth_hw_addr_set(netdev, mac_addr);
4425 netdev->priv_flags |= IFF_UNICAST_FLT;
4427 /* Setup netdev TC information */
4428 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
4430 netdev->max_mtu = ICE_MAX_MTU;
4435 static void ice_decfg_netdev(struct ice_vsi *vsi)
4437 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4438 free_netdev(vsi->netdev);
4442 static int ice_start_eth(struct ice_vsi *vsi)
4446 err = ice_init_mac_fltr(vsi->back);
4450 err = ice_vsi_open(vsi);
4452 ice_fltr_remove_all(vsi);
4457 static void ice_stop_eth(struct ice_vsi *vsi)
4459 ice_fltr_remove_all(vsi);
4463 static int ice_init_eth(struct ice_pf *pf)
4465 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4471 /* init channel list */
4472 INIT_LIST_HEAD(&vsi->ch_list);
4474 err = ice_cfg_netdev(vsi);
4477 /* Setup DCB netlink interface */
4478 ice_dcbnl_setup(vsi);
4480 err = ice_init_mac_fltr(pf);
4482 goto err_init_mac_fltr;
4484 err = ice_devlink_create_pf_port(pf);
4486 goto err_devlink_create_pf_port;
4488 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
4490 err = ice_register_netdev(vsi);
4492 goto err_register_netdev;
4494 err = ice_tc_indir_block_register(vsi);
4496 goto err_tc_indir_block_register;
4502 err_tc_indir_block_register:
4503 ice_unregister_netdev(vsi);
4504 err_register_netdev:
4505 ice_devlink_destroy_pf_port(pf);
4506 err_devlink_create_pf_port:
4508 ice_decfg_netdev(vsi);
4512 static void ice_deinit_eth(struct ice_pf *pf)
4514 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4520 ice_unregister_netdev(vsi);
4521 ice_devlink_destroy_pf_port(pf);
4522 ice_tc_indir_block_unregister(vsi);
4523 ice_decfg_netdev(vsi);
4527 * ice_wait_for_fw - wait for full FW readiness
4528 * @hw: pointer to the hardware structure
4529 * @timeout: milliseconds that can elapse before timing out
4531 static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
4536 while (elapsed <= timeout) {
4537 fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
4539 /* firmware was not yet loaded, we have to wait more */
4551 static int ice_init_dev(struct ice_pf *pf)
4553 struct device *dev = ice_pf_to_dev(pf);
4554 struct ice_hw *hw = &pf->hw;
4557 err = ice_init_hw(hw);
4559 dev_err(dev, "ice_init_hw failed: %d\n", err);
4563 /* Some cards require longer initialization times
4564 * due to necessity of loading FW from an external source.
4565 * This can take even half a minute.
4567 if (ice_is_pf_c827(hw)) {
4568 err = ice_wait_for_fw(hw, 30000);
4570 dev_err(dev, "ice_wait_for_fw timed out");
4575 ice_init_feature_support(pf);
4579 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4580 * set in pf->state, which will cause ice_is_safe_mode to return
4583 if (ice_is_safe_mode(pf)) {
4584 /* we already got function/device capabilities but these don't
4585 * reflect what the driver needs to do in safe mode. Instead of
4586 * adding conditional logic everywhere to ignore these
4587 * device/function capabilities, override them.
4589 ice_set_safe_mode_caps(hw);
4592 err = ice_init_pf(pf);
4594 dev_err(dev, "ice_init_pf failed: %d\n", err);
4598 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4599 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4600 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4601 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4602 if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4603 pf->hw.udp_tunnel_nic.tables[0].n_entries =
4604 pf->hw.tnl.valid_count[TNL_VXLAN];
4605 pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4606 UDP_TUNNEL_TYPE_VXLAN;
4608 if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4609 pf->hw.udp_tunnel_nic.tables[1].n_entries =
4610 pf->hw.tnl.valid_count[TNL_GENEVE];
4611 pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4612 UDP_TUNNEL_TYPE_GENEVE;
4615 err = ice_init_interrupt_scheme(pf);
4617 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4619 goto err_init_interrupt_scheme;
4622 /* In case of MSIX we are going to setup the misc vector right here
4623 * to handle admin queue events etc. In case of legacy and MSI
4624 * the misc functionality and queue processing is combined in
4625 * the same vector and that gets setup at open.
4627 err = ice_req_irq_msix_misc(pf);
4629 dev_err(dev, "setup of misc vector failed: %d\n", err);
4630 goto err_req_irq_msix_misc;
4635 err_req_irq_msix_misc:
4636 ice_clear_interrupt_scheme(pf);
4637 err_init_interrupt_scheme:
4644 static void ice_deinit_dev(struct ice_pf *pf)
4646 ice_free_irq_msix_misc(pf);
4648 ice_deinit_hw(&pf->hw);
4650 /* Service task is already stopped, so call reset directly. */
4651 ice_reset(&pf->hw, ICE_RESET_PFR);
4652 pci_wait_for_pending_transaction(pf->pdev);
4653 ice_clear_interrupt_scheme(pf);
4656 static void ice_init_features(struct ice_pf *pf)
4658 struct device *dev = ice_pf_to_dev(pf);
4660 if (ice_is_safe_mode(pf))
4663 /* initialize DDP driven features */
4664 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4667 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4670 if (ice_is_feature_supported(pf, ICE_F_CGU) ||
4671 ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
4674 /* Note: Flow director init failure is non-fatal to load */
4675 if (ice_init_fdir(pf))
4676 dev_err(dev, "could not initialize flow director\n");
4678 /* Note: DCB init failure is non-fatal to load */
4679 if (ice_init_pf_dcb(pf, false)) {
4680 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4681 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4683 ice_cfg_lldp_mib_change(&pf->hw, true);
4686 if (ice_init_lag(pf))
4687 dev_warn(dev, "Failed to init link aggregation support\n");
4690 static void ice_deinit_features(struct ice_pf *pf)
4692 if (ice_is_safe_mode(pf))
4696 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4697 ice_cfg_lldp_mib_change(&pf->hw, false);
4698 ice_deinit_fdir(pf);
4699 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4701 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4702 ice_ptp_release(pf);
4703 if (test_bit(ICE_FLAG_DPLL, pf->flags))
4704 ice_dpll_deinit(pf);
4707 static void ice_init_wakeup(struct ice_pf *pf)
4709 /* Save wakeup reason register for later use */
4710 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4712 /* check for a power management event */
4713 ice_print_wake_reason(pf);
4715 /* clear wake status, all bits */
4716 wr32(&pf->hw, PFPM_WUS, U32_MAX);
4718 /* Disable WoL at init, wait for user to enable */
4719 device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4722 static int ice_init_link(struct ice_pf *pf)
4724 struct device *dev = ice_pf_to_dev(pf);
4727 err = ice_init_link_events(pf->hw.port_info);
4729 dev_err(dev, "ice_init_link_events failed: %d\n", err);
4733 /* not a fatal error if this fails */
4734 err = ice_init_nvm_phy_type(pf->hw.port_info);
4736 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4738 /* not a fatal error if this fails */
4739 err = ice_update_link_info(pf->hw.port_info);
4741 dev_err(dev, "ice_update_link_info failed: %d\n", err);
4743 ice_init_link_dflt_override(pf->hw.port_info);
4745 ice_check_link_cfg_err(pf,
4746 pf->hw.port_info->phy.link_info.link_cfg_err);
4748 /* if media available, initialize PHY settings */
4749 if (pf->hw.port_info->phy.link_info.link_info &
4750 ICE_AQ_MEDIA_AVAILABLE) {
4751 /* not a fatal error if this fails */
4752 err = ice_init_phy_user_cfg(pf->hw.port_info);
4754 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4756 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4757 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4760 ice_configure_phy(vsi);
4763 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4769 static int ice_init_pf_sw(struct ice_pf *pf)
4771 bool dvm = ice_is_dvm_ena(&pf->hw);
4772 struct ice_vsi *vsi;
4775 /* create switch struct for the switch element created by FW on boot */
4776 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4781 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4783 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4785 pf->first_sw->pf = pf;
4787 /* record the sw_id available for later use */
4788 pf->first_sw->sw_id = pf->hw.port_info->sw_id;
4790 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
4792 goto err_aq_set_port_params;
4794 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4797 goto err_pf_vsi_setup;
4803 err_aq_set_port_params:
4804 kfree(pf->first_sw);
4808 static void ice_deinit_pf_sw(struct ice_pf *pf)
4810 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4815 ice_vsi_release(vsi);
4816 kfree(pf->first_sw);
4819 static int ice_alloc_vsis(struct ice_pf *pf)
4821 struct device *dev = ice_pf_to_dev(pf);
4823 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
4824 if (!pf->num_alloc_vsi)
4827 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4829 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4830 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4831 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4834 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4839 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
4840 sizeof(*pf->vsi_stats), GFP_KERNEL);
4841 if (!pf->vsi_stats) {
4842 devm_kfree(dev, pf->vsi);
4849 static void ice_dealloc_vsis(struct ice_pf *pf)
4851 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
4852 pf->vsi_stats = NULL;
4854 pf->num_alloc_vsi = 0;
4855 devm_kfree(ice_pf_to_dev(pf), pf->vsi);
4859 static int ice_init_devlink(struct ice_pf *pf)
4863 err = ice_devlink_register_params(pf);
4867 ice_devlink_init_regions(pf);
4868 ice_devlink_register(pf);
4873 static void ice_deinit_devlink(struct ice_pf *pf)
4875 ice_devlink_unregister(pf);
4876 ice_devlink_destroy_regions(pf);
4877 ice_devlink_unregister_params(pf);
4880 static int ice_init(struct ice_pf *pf)
4884 err = ice_init_dev(pf);
4888 err = ice_alloc_vsis(pf);
4890 goto err_alloc_vsis;
4892 err = ice_init_pf_sw(pf);
4894 goto err_init_pf_sw;
4896 ice_init_wakeup(pf);
4898 err = ice_init_link(pf);
4902 err = ice_send_version(pf);
4906 ice_verify_cacheline_size(pf);
4908 if (ice_is_safe_mode(pf))
4909 ice_set_safe_mode_vlan_cfg(pf);
4911 /* print PCI link speed and width */
4912 pcie_print_link_status(pf->pdev);
4914 /* ready to go, so clear down state bit */
4915 clear_bit(ICE_DOWN, pf->state);
4916 clear_bit(ICE_SERVICE_DIS, pf->state);
4918 /* since everything is good, start the service timer */
4919 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4924 ice_deinit_pf_sw(pf);
4926 ice_dealloc_vsis(pf);
4932 static void ice_deinit(struct ice_pf *pf)
4934 set_bit(ICE_SERVICE_DIS, pf->state);
4935 set_bit(ICE_DOWN, pf->state);
4937 ice_deinit_pf_sw(pf);
4938 ice_dealloc_vsis(pf);
4943 * ice_load - load pf by init hw and starting VSI
4944 * @pf: pointer to the pf instance
4946 int ice_load(struct ice_pf *pf)
4948 struct ice_vsi_cfg_params params = {};
4949 struct ice_vsi *vsi;
4952 err = ice_init_dev(pf);
4956 vsi = ice_get_main_vsi(pf);
4958 params = ice_vsi_to_params(vsi);
4959 params.flags = ICE_VSI_FLAG_INIT;
4962 err = ice_vsi_cfg(vsi, ¶ms);
4966 err = ice_start_eth(ice_get_main_vsi(pf));
4971 err = ice_init_rdma(pf);
4975 ice_init_features(pf);
4976 ice_service_task_restart(pf);
4978 clear_bit(ICE_DOWN, pf->state);
4983 ice_vsi_close(ice_get_main_vsi(pf));
4986 ice_vsi_decfg(ice_get_main_vsi(pf));
4994 * ice_unload - unload pf by stopping VSI and deinit hw
4995 * @pf: pointer to the pf instance
4997 void ice_unload(struct ice_pf *pf)
4999 ice_deinit_features(pf);
5000 ice_deinit_rdma(pf);
5002 ice_stop_eth(ice_get_main_vsi(pf));
5003 ice_vsi_decfg(ice_get_main_vsi(pf));
5009 * ice_probe - Device initialization routine
5010 * @pdev: PCI device information struct
5011 * @ent: entry in ice_pci_tbl
5013 * Returns 0 on success, negative on failure
5016 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5018 struct device *dev = &pdev->dev;
5023 if (pdev->is_virtfn) {
5024 dev_err(dev, "can't probe a virtual function\n");
5028 /* when under a kdump kernel initiate a reset before enabling the
5029 * device in order to clear out any pending DMA transactions. These
5030 * transactions can cause some systems to machine check when doing
5031 * the pcim_enable_device() below.
5033 if (is_kdump_kernel()) {
5034 pci_save_state(pdev);
5035 pci_clear_master(pdev);
5036 err = pcie_flr(pdev);
5039 pci_restore_state(pdev);
5042 /* this driver uses devres, see
5043 * Documentation/driver-api/driver-model/devres.rst
5045 err = pcim_enable_device(pdev);
5049 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5051 dev_err(dev, "BAR0 I/O map error %d\n", err);
5055 pf = ice_allocate_pf(dev);
5059 /* initialize Auxiliary index to invalid value */
5062 /* set up for high or low DMA */
5063 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5065 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5069 pci_set_master(pdev);
5072 pci_set_drvdata(pdev, pf);
5073 set_bit(ICE_DOWN, pf->state);
5074 /* Disable service task until DOWN bit is cleared */
5075 set_bit(ICE_SERVICE_DIS, pf->state);
5078 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
5079 pci_save_state(pdev);
5082 hw->port_info = NULL;
5083 hw->vendor_id = pdev->vendor;
5084 hw->device_id = pdev->device;
5085 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5086 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5087 hw->subsystem_device_id = pdev->subsystem_device;
5088 hw->bus.device = PCI_SLOT(pdev->devfn);
5089 hw->bus.func = PCI_FUNC(pdev->devfn);
5090 ice_set_ctrlq_len(hw);
5092 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5094 #ifndef CONFIG_DYNAMIC_DEBUG
5096 hw->debug_mask = debug;
5103 err = ice_init_eth(pf);
5107 err = ice_init_rdma(pf);
5111 err = ice_init_devlink(pf);
5113 goto err_init_devlink;
5115 ice_init_features(pf);
5120 ice_deinit_rdma(pf);
5126 pci_disable_device(pdev);
5131 * ice_set_wake - enable or disable Wake on LAN
5132 * @pf: pointer to the PF struct
5134 * Simple helper for WoL control
5136 static void ice_set_wake(struct ice_pf *pf)
5138 struct ice_hw *hw = &pf->hw;
5139 bool wol = pf->wol_ena;
5141 /* clear wake state, otherwise new wake events won't fire */
5142 wr32(hw, PFPM_WUS, U32_MAX);
5144 /* enable / disable APM wake up, no RMW needed */
5145 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5147 /* set magic packet filter enabled */
5148 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5152 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5153 * @pf: pointer to the PF struct
5155 * Issue firmware command to enable multicast magic wake, making
5156 * sure that any locally administered address (LAA) is used for
5157 * wake, and that PF reset doesn't undo the LAA.
5159 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5161 struct device *dev = ice_pf_to_dev(pf);
5162 struct ice_hw *hw = &pf->hw;
5163 u8 mac_addr[ETH_ALEN];
5164 struct ice_vsi *vsi;
5171 vsi = ice_get_main_vsi(pf);
5175 /* Get current MAC address in case it's an LAA */
5177 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5179 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5181 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5182 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5183 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5185 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5187 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5188 status, ice_aq_str(hw->adminq.sq_last_status));
5192 * ice_remove - Device removal routine
5193 * @pdev: PCI device information struct
5195 static void ice_remove(struct pci_dev *pdev)
5197 struct ice_pf *pf = pci_get_drvdata(pdev);
5200 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5201 if (!ice_is_reset_in_progress(pf->state))
5206 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5207 set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5211 ice_service_task_stop(pf);
5212 ice_aq_cancel_waiting_tasks(pf);
5213 set_bit(ICE_DOWN, pf->state);
5215 if (!ice_is_safe_mode(pf))
5216 ice_remove_arfs(pf);
5217 ice_deinit_features(pf);
5218 ice_deinit_devlink(pf);
5219 ice_deinit_rdma(pf);
5223 ice_vsi_release_all(pf);
5225 ice_setup_mc_magic_wake(pf);
5228 pci_disable_device(pdev);
5232 * ice_shutdown - PCI callback for shutting down device
5233 * @pdev: PCI device information struct
5235 static void ice_shutdown(struct pci_dev *pdev)
5237 struct ice_pf *pf = pci_get_drvdata(pdev);
5241 if (system_state == SYSTEM_POWER_OFF) {
5242 pci_wake_from_d3(pdev, pf->wol_ena);
5243 pci_set_power_state(pdev, PCI_D3hot);
5249 * ice_prepare_for_shutdown - prep for PCI shutdown
5250 * @pf: board private structure
5252 * Inform or close all dependent features in prep for PCI device shutdown
5254 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5256 struct ice_hw *hw = &pf->hw;
5259 /* Notify VFs of impending reset */
5260 if (ice_check_sq_alive(hw, &hw->mailboxq))
5261 ice_vc_notify_reset(pf);
5263 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5265 /* disable the VSIs and their queues that are not already DOWN */
5266 ice_pf_dis_all_vsi(pf, false);
5268 ice_for_each_vsi(pf, v)
5270 pf->vsi[v]->vsi_num = 0;
5272 ice_shutdown_all_ctrlq(hw);
5276 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5277 * @pf: board private structure to reinitialize
5279 * This routine reinitialize interrupt scheme that was cleared during
5280 * power management suspend callback.
5282 * This should be called during resume routine to re-allocate the q_vectors
5283 * and reacquire interrupts.
5285 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5287 struct device *dev = ice_pf_to_dev(pf);
5290 /* Since we clear MSIX flag during suspend, we need to
5291 * set it back during resume...
5294 ret = ice_init_interrupt_scheme(pf);
5296 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5300 /* Remap vectors and rings, after successful re-init interrupts */
5301 ice_for_each_vsi(pf, v) {
5305 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5308 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5311 ret = ice_req_irq_msix_misc(pf);
5313 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5323 ice_vsi_free_q_vectors(pf->vsi[v]);
5330 * @dev: generic device information structure
5332 * Power Management callback to quiesce the device and prepare
5333 * for D3 transition.
5335 static int __maybe_unused ice_suspend(struct device *dev)
5337 struct pci_dev *pdev = to_pci_dev(dev);
5341 pf = pci_get_drvdata(pdev);
5343 if (!ice_pf_state_is_nominal(pf)) {
5344 dev_err(dev, "Device is not ready, no need to suspend it\n");
5348 /* Stop watchdog tasks until resume completion.
5349 * Even though it is most likely that the service task is
5350 * disabled if the device is suspended or down, the service task's
5351 * state is controlled by a different state bit, and we should
5352 * store and honor whatever state that bit is in at this point.
5354 disabled = ice_service_task_stop(pf);
5356 ice_unplug_aux_dev(pf);
5358 /* Already suspended?, then there is nothing to do */
5359 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5361 ice_service_task_restart(pf);
5365 if (test_bit(ICE_DOWN, pf->state) ||
5366 ice_is_reset_in_progress(pf->state)) {
5367 dev_err(dev, "can't suspend device in reset or already down\n");
5369 ice_service_task_restart(pf);
5373 ice_setup_mc_magic_wake(pf);
5375 ice_prepare_for_shutdown(pf);
5379 /* Free vectors, clear the interrupt scheme and release IRQs
5380 * for proper hibernation, especially with large number of CPUs.
5381 * Otherwise hibernation might fail when mapping all the vectors back
5384 ice_free_irq_msix_misc(pf);
5385 ice_for_each_vsi(pf, v) {
5388 ice_vsi_free_q_vectors(pf->vsi[v]);
5390 ice_clear_interrupt_scheme(pf);
5392 pci_save_state(pdev);
5393 pci_wake_from_d3(pdev, pf->wol_ena);
5394 pci_set_power_state(pdev, PCI_D3hot);
5399 * ice_resume - PM callback for waking up from D3
5400 * @dev: generic device information structure
5402 static int __maybe_unused ice_resume(struct device *dev)
5404 struct pci_dev *pdev = to_pci_dev(dev);
5405 enum ice_reset_req reset_type;
5410 pci_set_power_state(pdev, PCI_D0);
5411 pci_restore_state(pdev);
5412 pci_save_state(pdev);
5414 if (!pci_device_is_present(pdev))
5417 ret = pci_enable_device_mem(pdev);
5419 dev_err(dev, "Cannot enable device after suspend\n");
5423 pf = pci_get_drvdata(pdev);
5426 pf->wakeup_reason = rd32(hw, PFPM_WUS);
5427 ice_print_wake_reason(pf);
5429 /* We cleared the interrupt scheme when we suspended, so we need to
5430 * restore it now to resume device functionality.
5432 ret = ice_reinit_interrupt_scheme(pf);
5434 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5436 clear_bit(ICE_DOWN, pf->state);
5437 /* Now perform PF reset and rebuild */
5438 reset_type = ICE_RESET_PFR;
5439 /* re-enable service task for reset, but allow reset to schedule it */
5440 clear_bit(ICE_SERVICE_DIS, pf->state);
5442 if (ice_schedule_reset(pf, reset_type))
5443 dev_err(dev, "Reset during resume failed.\n");
5445 clear_bit(ICE_SUSPENDED, pf->state);
5446 ice_service_task_restart(pf);
5448 /* Restart the service task */
5449 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5453 #endif /* CONFIG_PM */
5456 * ice_pci_err_detected - warning that PCI error has been detected
5457 * @pdev: PCI device information struct
5458 * @err: the type of PCI error
5460 * Called to warn that something happened on the PCI bus and the error handling
5461 * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
5463 static pci_ers_result_t
5464 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5466 struct ice_pf *pf = pci_get_drvdata(pdev);
5469 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5471 return PCI_ERS_RESULT_DISCONNECT;
5474 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5475 ice_service_task_stop(pf);
5477 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5478 set_bit(ICE_PFR_REQ, pf->state);
5479 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5483 return PCI_ERS_RESULT_NEED_RESET;
5487 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5488 * @pdev: PCI device information struct
5490 * Called to determine if the driver can recover from the PCI slot reset by
5491 * using a register read to determine if the device is recoverable.
5493 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5495 struct ice_pf *pf = pci_get_drvdata(pdev);
5496 pci_ers_result_t result;
5500 err = pci_enable_device_mem(pdev);
5502 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5504 result = PCI_ERS_RESULT_DISCONNECT;
5506 pci_set_master(pdev);
5507 pci_restore_state(pdev);
5508 pci_save_state(pdev);
5509 pci_wake_from_d3(pdev, false);
5511 /* Check for life */
5512 reg = rd32(&pf->hw, GLGEN_RTRIG);
5514 result = PCI_ERS_RESULT_RECOVERED;
5516 result = PCI_ERS_RESULT_DISCONNECT;
5523 * ice_pci_err_resume - restart operations after PCI error recovery
5524 * @pdev: PCI device information struct
5526 * Called to allow the driver to bring things back up after PCI error and/or
5527 * reset recovery have finished
5529 static void ice_pci_err_resume(struct pci_dev *pdev)
5531 struct ice_pf *pf = pci_get_drvdata(pdev);
5534 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5539 if (test_bit(ICE_SUSPENDED, pf->state)) {
5540 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5545 ice_restore_all_vfs_msi_state(pf);
5547 ice_do_reset(pf, ICE_RESET_PFR);
5548 ice_service_task_restart(pf);
5549 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5553 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5554 * @pdev: PCI device information struct
5556 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5558 struct ice_pf *pf = pci_get_drvdata(pdev);
5560 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5561 ice_service_task_stop(pf);
5563 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5564 set_bit(ICE_PFR_REQ, pf->state);
5565 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5571 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5572 * @pdev: PCI device information struct
5574 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5576 ice_pci_err_resume(pdev);
5579 /* ice_pci_tbl - PCI Device ID Table
5581 * Wildcard entries (PCI_ANY_ID) should come last
5582 * Last entry must be all 0s
5584 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5585 * Class, Class Mask, private data (not used) }
5587 static const struct pci_device_id ice_pci_tbl[] = {
5588 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) },
5589 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) },
5590 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) },
5591 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) },
5592 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) },
5593 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) },
5594 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) },
5595 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) },
5596 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) },
5597 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) },
5598 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) },
5599 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) },
5600 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) },
5601 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) },
5602 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) },
5603 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) },
5604 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) },
5605 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) },
5606 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) },
5607 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) },
5608 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) },
5609 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) },
5610 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) },
5611 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
5612 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
5613 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
5614 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) },
5615 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) },
5616 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) },
5617 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP_DD) },
5618 /* required last entry */
5621 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5623 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5625 static const struct pci_error_handlers ice_pci_err_handler = {
5626 .error_detected = ice_pci_err_detected,
5627 .slot_reset = ice_pci_err_slot_reset,
5628 .reset_prepare = ice_pci_err_reset_prepare,
5629 .reset_done = ice_pci_err_reset_done,
5630 .resume = ice_pci_err_resume
5633 static struct pci_driver ice_driver = {
5634 .name = KBUILD_MODNAME,
5635 .id_table = ice_pci_tbl,
5637 .remove = ice_remove,
5639 .driver.pm = &ice_pm_ops,
5640 #endif /* CONFIG_PM */
5641 .shutdown = ice_shutdown,
5642 .sriov_configure = ice_sriov_configure,
5643 .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix,
5644 .sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count,
5645 .err_handler = &ice_pci_err_handler
5649 * ice_module_init - Driver registration routine
5651 * ice_module_init is the first routine called when the driver is
5652 * loaded. All it does is register with the PCI subsystem.
5654 static int __init ice_module_init(void)
5656 int status = -ENOMEM;
5658 pr_info("%s\n", ice_driver_string);
5659 pr_info("%s\n", ice_copyright);
5661 ice_adv_lnk_speed_maps_init();
5663 ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
5665 pr_err("Failed to create workqueue\n");
5669 ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
5671 pr_err("Failed to create LAG workqueue\n");
5675 status = pci_register_driver(&ice_driver);
5677 pr_err("failed to register PCI driver, err %d\n", status);
5678 goto err_dest_lag_wq;
5684 destroy_workqueue(ice_lag_wq);
5686 destroy_workqueue(ice_wq);
5689 module_init(ice_module_init);
5692 * ice_module_exit - Driver exit cleanup routine
5694 * ice_module_exit is called just before the driver is removed
5697 static void __exit ice_module_exit(void)
5699 pci_unregister_driver(&ice_driver);
5700 destroy_workqueue(ice_wq);
5701 destroy_workqueue(ice_lag_wq);
5702 pr_info("module unloaded\n");
5704 module_exit(ice_module_exit);
5707 * ice_set_mac_address - NDO callback to set MAC address
5708 * @netdev: network interface device structure
5709 * @pi: pointer to an address structure
5711 * Returns 0 on success, negative on failure
5713 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5715 struct ice_netdev_priv *np = netdev_priv(netdev);
5716 struct ice_vsi *vsi = np->vsi;
5717 struct ice_pf *pf = vsi->back;
5718 struct ice_hw *hw = &pf->hw;
5719 struct sockaddr *addr = pi;
5720 u8 old_mac[ETH_ALEN];
5725 mac = (u8 *)addr->sa_data;
5727 if (!is_valid_ether_addr(mac))
5728 return -EADDRNOTAVAIL;
5730 if (test_bit(ICE_DOWN, pf->state) ||
5731 ice_is_reset_in_progress(pf->state)) {
5732 netdev_err(netdev, "can't set mac %pM. device not ready\n",
5737 if (ice_chnl_dmac_fltr_cnt(pf)) {
5738 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5743 netif_addr_lock_bh(netdev);
5744 ether_addr_copy(old_mac, netdev->dev_addr);
5745 /* change the netdev's MAC address */
5746 eth_hw_addr_set(netdev, mac);
5747 netif_addr_unlock_bh(netdev);
5749 /* Clean up old MAC filter. Not an error if old filter doesn't exist */
5750 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5751 if (err && err != -ENOENT) {
5752 err = -EADDRNOTAVAIL;
5753 goto err_update_filters;
5756 /* Add filter for new MAC. If filter exists, return success */
5757 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5758 if (err == -EEXIST) {
5759 /* Although this MAC filter is already present in hardware it's
5760 * possible in some cases (e.g. bonding) that dev_addr was
5761 * modified outside of the driver and needs to be restored back
5764 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5768 /* error if the new filter addition failed */
5769 err = -EADDRNOTAVAIL;
5774 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5776 netif_addr_lock_bh(netdev);
5777 eth_hw_addr_set(netdev, old_mac);
5778 netif_addr_unlock_bh(netdev);
5782 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5785 /* write new MAC address to the firmware */
5786 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5787 err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5789 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
5796 * ice_set_rx_mode - NDO callback to set the netdev filters
5797 * @netdev: network interface device structure
5799 static void ice_set_rx_mode(struct net_device *netdev)
5801 struct ice_netdev_priv *np = netdev_priv(netdev);
5802 struct ice_vsi *vsi = np->vsi;
5804 if (!vsi || ice_is_switchdev_running(vsi->back))
5807 /* Set the flags to synchronize filters
5808 * ndo_set_rx_mode may be triggered even without a change in netdev
5811 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5812 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5813 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5815 /* schedule our worker thread which will take care of
5816 * applying the new filter changes
5818 ice_service_task_schedule(vsi->back);
5822 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5823 * @netdev: network interface device structure
5824 * @queue_index: Queue ID
5825 * @maxrate: maximum bandwidth in Mbps
5828 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5830 struct ice_netdev_priv *np = netdev_priv(netdev);
5831 struct ice_vsi *vsi = np->vsi;
5836 /* Validate maxrate requested is within permitted range */
5837 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5838 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5839 maxrate, queue_index);
5843 q_handle = vsi->tx_rings[queue_index]->q_handle;
5844 tc = ice_dcb_get_tc(vsi, queue_index);
5846 vsi = ice_locate_vsi_using_queue(vsi, queue_index);
5848 netdev_err(netdev, "Invalid VSI for given queue %d\n",
5853 /* Set BW back to default, when user set maxrate to 0 */
5855 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5856 q_handle, ICE_MAX_BW);
5858 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5859 q_handle, ICE_MAX_BW, maxrate * 1000);
5861 netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
5868 * ice_fdb_add - add an entry to the hardware database
5869 * @ndm: the input from the stack
5870 * @tb: pointer to array of nladdr (unused)
5871 * @dev: the net device pointer
5872 * @addr: the MAC address entry being added
5874 * @flags: instructions from stack about fdb operation
5875 * @extack: netlink extended ack
5878 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5879 struct net_device *dev, const unsigned char *addr, u16 vid,
5880 u16 flags, struct netlink_ext_ack __always_unused *extack)
5885 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5888 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5889 netdev_err(dev, "FDB only supports static addresses\n");
5893 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5894 err = dev_uc_add_excl(dev, addr);
5895 else if (is_multicast_ether_addr(addr))
5896 err = dev_mc_add_excl(dev, addr);
5900 /* Only return duplicate errors if NLM_F_EXCL is set */
5901 if (err == -EEXIST && !(flags & NLM_F_EXCL))
5908 * ice_fdb_del - delete an entry from the hardware database
5909 * @ndm: the input from the stack
5910 * @tb: pointer to array of nladdr (unused)
5911 * @dev: the net device pointer
5912 * @addr: the MAC address entry being added
5914 * @extack: netlink extended ack
5917 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5918 struct net_device *dev, const unsigned char *addr,
5919 __always_unused u16 vid, struct netlink_ext_ack *extack)
5923 if (ndm->ndm_state & NUD_PERMANENT) {
5924 netdev_err(dev, "FDB only supports static addresses\n");
5928 if (is_unicast_ether_addr(addr))
5929 err = dev_uc_del(dev, addr);
5930 else if (is_multicast_ether_addr(addr))
5931 err = dev_mc_del(dev, addr);
5938 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
5939 NETIF_F_HW_VLAN_CTAG_TX | \
5940 NETIF_F_HW_VLAN_STAG_RX | \
5941 NETIF_F_HW_VLAN_STAG_TX)
5943 #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
5944 NETIF_F_HW_VLAN_STAG_RX)
5946 #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
5947 NETIF_F_HW_VLAN_STAG_FILTER)
5950 * ice_fix_features - fix the netdev features flags based on device limitations
5951 * @netdev: ptr to the netdev that flags are being fixed on
5952 * @features: features that need to be checked and possibly fixed
5954 * Make sure any fixups are made to features in this callback. This enables the
5955 * driver to not have to check unsupported configurations throughout the driver
5956 * because that's the responsiblity of this callback.
5958 * Single VLAN Mode (SVM) Supported Features:
5959 * NETIF_F_HW_VLAN_CTAG_FILTER
5960 * NETIF_F_HW_VLAN_CTAG_RX
5961 * NETIF_F_HW_VLAN_CTAG_TX
5963 * Double VLAN Mode (DVM) Supported Features:
5964 * NETIF_F_HW_VLAN_CTAG_FILTER
5965 * NETIF_F_HW_VLAN_CTAG_RX
5966 * NETIF_F_HW_VLAN_CTAG_TX
5968 * NETIF_F_HW_VLAN_STAG_FILTER
5969 * NETIF_HW_VLAN_STAG_RX
5970 * NETIF_HW_VLAN_STAG_TX
5972 * Features that need fixing:
5973 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
5974 * These are mutually exlusive as the VSI context cannot support multiple
5975 * VLAN ethertypes simultaneously for stripping and/or insertion. If this
5976 * is not done, then default to clearing the requested STAG offload
5979 * All supported filtering has to be enabled or disabled together. For
5980 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
5981 * together. If this is not done, then default to VLAN filtering disabled.
5982 * These are mutually exclusive as there is currently no way to
5983 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
5986 static netdev_features_t
5987 ice_fix_features(struct net_device *netdev, netdev_features_t features)
5989 struct ice_netdev_priv *np = netdev_priv(netdev);
5990 netdev_features_t req_vlan_fltr, cur_vlan_fltr;
5991 bool cur_ctag, cur_stag, req_ctag, req_stag;
5993 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
5994 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5995 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5997 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
5998 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5999 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6001 if (req_vlan_fltr != cur_vlan_fltr) {
6002 if (ice_is_dvm_ena(&np->vsi->back->hw)) {
6003 if (req_ctag && req_stag) {
6004 features |= NETIF_VLAN_FILTERING_FEATURES;
6005 } else if (!req_ctag && !req_stag) {
6006 features &= ~NETIF_VLAN_FILTERING_FEATURES;
6007 } else if ((!cur_ctag && req_ctag && !cur_stag) ||
6008 (!cur_stag && req_stag && !cur_ctag)) {
6009 features |= NETIF_VLAN_FILTERING_FEATURES;
6010 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
6011 } else if ((cur_ctag && !req_ctag && cur_stag) ||
6012 (cur_stag && !req_stag && cur_ctag)) {
6013 features &= ~NETIF_VLAN_FILTERING_FEATURES;
6014 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
6017 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
6018 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
6020 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
6021 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6025 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
6026 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
6027 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
6028 features &= ~(NETIF_F_HW_VLAN_STAG_RX |
6029 NETIF_F_HW_VLAN_STAG_TX);
6032 if (!(netdev->features & NETIF_F_RXFCS) &&
6033 (features & NETIF_F_RXFCS) &&
6034 (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6035 !ice_vsi_has_non_zero_vlans(np->vsi)) {
6036 netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6037 features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6044 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6046 * @features: features used to determine VLAN offload settings
6048 * First, determine the vlan_ethertype based on the VLAN offload bits in
6049 * features. Then determine if stripping and insertion should be enabled or
6050 * disabled. Finally enable or disable VLAN stripping and insertion.
6053 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6055 bool enable_stripping = true, enable_insertion = true;
6056 struct ice_vsi_vlan_ops *vlan_ops;
6057 int strip_err = 0, insert_err = 0;
6058 u16 vlan_ethertype = 0;
6060 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6062 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
6063 vlan_ethertype = ETH_P_8021AD;
6064 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
6065 vlan_ethertype = ETH_P_8021Q;
6067 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
6068 enable_stripping = false;
6069 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
6070 enable_insertion = false;
6072 if (enable_stripping)
6073 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6075 strip_err = vlan_ops->dis_stripping(vsi);
6077 if (enable_insertion)
6078 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6080 insert_err = vlan_ops->dis_insertion(vsi);
6082 if (strip_err || insert_err)
6089 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6091 * @features: features used to determine VLAN filtering settings
6093 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
6097 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6099 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6102 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
6103 * if either bit is set
6106 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
6107 err = vlan_ops->ena_rx_filtering(vsi);
6109 err = vlan_ops->dis_rx_filtering(vsi);
6115 * ice_set_vlan_features - set VLAN settings based on suggested feature set
6116 * @netdev: ptr to the netdev being adjusted
6117 * @features: the feature set that the stack is suggesting
6119 * Only update VLAN settings if the requested_vlan_features are different than
6120 * the current_vlan_features.
6123 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6125 netdev_features_t current_vlan_features, requested_vlan_features;
6126 struct ice_netdev_priv *np = netdev_priv(netdev);
6127 struct ice_vsi *vsi = np->vsi;
6130 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6131 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6132 if (current_vlan_features ^ requested_vlan_features) {
6133 if ((features & NETIF_F_RXFCS) &&
6134 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6135 dev_err(ice_pf_to_dev(vsi->back),
6136 "To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6140 err = ice_set_vlan_offload_features(vsi, features);
6145 current_vlan_features = netdev->features &
6146 NETIF_VLAN_FILTERING_FEATURES;
6147 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6148 if (current_vlan_features ^ requested_vlan_features) {
6149 err = ice_set_vlan_filtering_features(vsi, features);
6158 * ice_set_loopback - turn on/off loopback mode on underlying PF
6160 * @ena: flag to indicate the on/off setting
6162 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6164 bool if_running = netif_running(vsi->netdev);
6167 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6168 ret = ice_down(vsi);
6170 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6174 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6176 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6184 * ice_set_features - set the netdev feature flags
6185 * @netdev: ptr to the netdev being adjusted
6186 * @features: the feature set that the stack is suggesting
6189 ice_set_features(struct net_device *netdev, netdev_features_t features)
6191 netdev_features_t changed = netdev->features ^ features;
6192 struct ice_netdev_priv *np = netdev_priv(netdev);
6193 struct ice_vsi *vsi = np->vsi;
6194 struct ice_pf *pf = vsi->back;
6197 /* Don't set any netdev advanced features with device in Safe Mode */
6198 if (ice_is_safe_mode(pf)) {
6199 dev_err(ice_pf_to_dev(pf),
6200 "Device is in Safe Mode - not enabling advanced netdev features\n");
6204 /* Do not change setting during reset */
6205 if (ice_is_reset_in_progress(pf->state)) {
6206 dev_err(ice_pf_to_dev(pf),
6207 "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6211 /* Multiple features can be changed in one call so keep features in
6212 * separate if/else statements to guarantee each feature is checked
6214 if (changed & NETIF_F_RXHASH)
6215 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6217 ret = ice_set_vlan_features(netdev, features);
6221 /* Turn on receive of FCS aka CRC, and after setting this
6222 * flag the packet data will have the 4 byte CRC appended
6224 if (changed & NETIF_F_RXFCS) {
6225 if ((features & NETIF_F_RXFCS) &&
6226 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6227 dev_err(ice_pf_to_dev(vsi->back),
6228 "To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6232 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6233 ret = ice_down_up(vsi);
6238 if (changed & NETIF_F_NTUPLE) {
6239 bool ena = !!(features & NETIF_F_NTUPLE);
6241 ice_vsi_manage_fdir(vsi, ena);
6242 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6245 /* don't turn off hw_tc_offload when ADQ is already enabled */
6246 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6247 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6251 if (changed & NETIF_F_HW_TC) {
6252 bool ena = !!(features & NETIF_F_HW_TC);
6254 ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6255 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6258 if (changed & NETIF_F_LOOPBACK)
6259 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6265 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6266 * @vsi: VSI to setup VLAN properties for
6268 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6272 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6276 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6280 return ice_vsi_add_vlan_zero(vsi);
6284 * ice_vsi_cfg_lan - Setup the VSI lan related config
6285 * @vsi: the VSI being configured
6287 * Return 0 on success and negative value on error
6289 int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6293 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6294 ice_set_rx_mode(vsi->netdev);
6296 err = ice_vsi_vlan_setup(vsi);
6300 ice_vsi_cfg_dcb_rings(vsi);
6302 err = ice_vsi_cfg_lan_txqs(vsi);
6303 if (!err && ice_is_xdp_ena_vsi(vsi))
6304 err = ice_vsi_cfg_xdp_txqs(vsi);
6306 err = ice_vsi_cfg_rxqs(vsi);
6311 /* THEORY OF MODERATION:
6312 * The ice driver hardware works differently than the hardware that DIMLIB was
6313 * originally made for. ice hardware doesn't have packet count limits that
6314 * can trigger an interrupt, but it *does* have interrupt rate limit support,
6315 * which is hard-coded to a limit of 250,000 ints/second.
6316 * If not using dynamic moderation, the INTRL value can be modified
6317 * by ethtool rx-usecs-high.
6320 /* the throttle rate for interrupts, basically worst case delay before
6321 * an initial interrupt fires, value is stored in microseconds.
6326 /* Make a different profile for Rx that doesn't allow quite so aggressive
6327 * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6330 static const struct ice_dim rx_profile[] = {
6331 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6332 {8}, /* 125,000 ints/s */
6333 {16}, /* 62,500 ints/s */
6334 {62}, /* 16,129 ints/s */
6335 {126} /* 7,936 ints/s */
6338 /* The transmit profile, which has the same sorts of values
6339 * as the previous struct
6341 static const struct ice_dim tx_profile[] = {
6342 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6343 {8}, /* 125,000 ints/s */
6344 {40}, /* 16,125 ints/s */
6345 {128}, /* 7,812 ints/s */
6346 {256} /* 3,906 ints/s */
6349 static void ice_tx_dim_work(struct work_struct *work)
6351 struct ice_ring_container *rc;
6355 dim = container_of(work, struct dim, work);
6358 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6360 /* look up the values in our local table */
6361 itr = tx_profile[dim->profile_ix].itr;
6363 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6364 ice_write_itr(rc, itr);
6366 dim->state = DIM_START_MEASURE;
6369 static void ice_rx_dim_work(struct work_struct *work)
6371 struct ice_ring_container *rc;
6375 dim = container_of(work, struct dim, work);
6378 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6380 /* look up the values in our local table */
6381 itr = rx_profile[dim->profile_ix].itr;
6383 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6384 ice_write_itr(rc, itr);
6386 dim->state = DIM_START_MEASURE;
6389 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6392 * ice_init_moderation - set up interrupt moderation
6393 * @q_vector: the vector containing rings to be configured
6395 * Set up interrupt moderation registers, with the intent to do the right thing
6396 * when called from reset or from probe, and whether or not dynamic moderation
6397 * is enabled or not. Take special care to write all the registers in both
6398 * dynamic moderation mode or not in order to make sure hardware is in a known
6401 static void ice_init_moderation(struct ice_q_vector *q_vector)
6403 struct ice_ring_container *rc;
6404 bool tx_dynamic, rx_dynamic;
6407 INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6408 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6409 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6411 tx_dynamic = ITR_IS_DYNAMIC(rc);
6413 /* set the initial TX ITR to match the above */
6414 ice_write_itr(rc, tx_dynamic ?
6415 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6418 INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6419 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6420 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6422 rx_dynamic = ITR_IS_DYNAMIC(rc);
6424 /* set the initial RX ITR to match the above */
6425 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6428 ice_set_q_vector_intrl(q_vector);
6432 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6433 * @vsi: the VSI being configured
6435 static void ice_napi_enable_all(struct ice_vsi *vsi)
6442 ice_for_each_q_vector(vsi, q_idx) {
6443 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6445 ice_init_moderation(q_vector);
6447 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6448 napi_enable(&q_vector->napi);
6453 * ice_up_complete - Finish the last steps of bringing up a connection
6454 * @vsi: The VSI being configured
6456 * Return 0 on success and negative value on error
6458 static int ice_up_complete(struct ice_vsi *vsi)
6460 struct ice_pf *pf = vsi->back;
6463 ice_vsi_cfg_msix(vsi);
6465 /* Enable only Rx rings, Tx rings were enabled by the FW when the
6466 * Tx queue group list was configured and the context bits were
6467 * programmed using ice_vsi_cfg_txqs
6469 err = ice_vsi_start_all_rx_rings(vsi);
6473 clear_bit(ICE_VSI_DOWN, vsi->state);
6474 ice_napi_enable_all(vsi);
6475 ice_vsi_ena_irq(vsi);
6477 if (vsi->port_info &&
6478 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6479 vsi->netdev && vsi->type == ICE_VSI_PF) {
6480 ice_print_link_msg(vsi, true);
6481 netif_tx_start_all_queues(vsi->netdev);
6482 netif_carrier_on(vsi->netdev);
6483 ice_ptp_link_change(pf, pf->hw.pf_id, true);
6486 /* Perform an initial read of the statistics registers now to
6487 * set the baseline so counters are ready when interface is up
6489 ice_update_eth_stats(vsi);
6491 if (vsi->type == ICE_VSI_PF)
6492 ice_service_task_schedule(pf);
6498 * ice_up - Bring the connection back up after being down
6499 * @vsi: VSI being configured
6501 int ice_up(struct ice_vsi *vsi)
6505 err = ice_vsi_cfg_lan(vsi);
6507 err = ice_up_complete(vsi);
6513 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6514 * @syncp: pointer to u64_stats_sync
6515 * @stats: stats that pkts and bytes count will be taken from
6516 * @pkts: packets stats counter
6517 * @bytes: bytes stats counter
6519 * This function fetches stats from the ring considering the atomic operations
6520 * that needs to be performed to read u64 values in 32 bit machine.
6523 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6524 struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6529 start = u64_stats_fetch_begin(syncp);
6531 *bytes = stats.bytes;
6532 } while (u64_stats_fetch_retry(syncp, start));
6536 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6537 * @vsi: the VSI to be updated
6538 * @vsi_stats: the stats struct to be updated
6539 * @rings: rings to work on
6540 * @count: number of rings
6543 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6544 struct rtnl_link_stats64 *vsi_stats,
6545 struct ice_tx_ring **rings, u16 count)
6549 for (i = 0; i < count; i++) {
6550 struct ice_tx_ring *ring;
6551 u64 pkts = 0, bytes = 0;
6553 ring = READ_ONCE(rings[i]);
6554 if (!ring || !ring->ring_stats)
6556 ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6557 ring->ring_stats->stats, &pkts,
6559 vsi_stats->tx_packets += pkts;
6560 vsi_stats->tx_bytes += bytes;
6561 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6562 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6563 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6568 * ice_update_vsi_ring_stats - Update VSI stats counters
6569 * @vsi: the VSI to be updated
6571 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6573 struct rtnl_link_stats64 *net_stats, *stats_prev;
6574 struct rtnl_link_stats64 *vsi_stats;
6578 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6582 /* reset non-netdev (extended) stats */
6583 vsi->tx_restart = 0;
6585 vsi->tx_linearize = 0;
6586 vsi->rx_buf_failed = 0;
6587 vsi->rx_page_failed = 0;
6591 /* update Tx rings counters */
6592 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6595 /* update Rx rings counters */
6596 ice_for_each_rxq(vsi, i) {
6597 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6598 struct ice_ring_stats *ring_stats;
6600 ring_stats = ring->ring_stats;
6601 ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6602 ring_stats->stats, &pkts,
6604 vsi_stats->rx_packets += pkts;
6605 vsi_stats->rx_bytes += bytes;
6606 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6607 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6610 /* update XDP Tx rings counters */
6611 if (ice_is_xdp_ena_vsi(vsi))
6612 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6617 net_stats = &vsi->net_stats;
6618 stats_prev = &vsi->net_stats_prev;
6620 /* clear prev counters after reset */
6621 if (vsi_stats->tx_packets < stats_prev->tx_packets ||
6622 vsi_stats->rx_packets < stats_prev->rx_packets) {
6623 stats_prev->tx_packets = 0;
6624 stats_prev->tx_bytes = 0;
6625 stats_prev->rx_packets = 0;
6626 stats_prev->rx_bytes = 0;
6629 /* update netdev counters */
6630 net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6631 net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6632 net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6633 net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6635 stats_prev->tx_packets = vsi_stats->tx_packets;
6636 stats_prev->tx_bytes = vsi_stats->tx_bytes;
6637 stats_prev->rx_packets = vsi_stats->rx_packets;
6638 stats_prev->rx_bytes = vsi_stats->rx_bytes;
6644 * ice_update_vsi_stats - Update VSI stats counters
6645 * @vsi: the VSI to be updated
6647 void ice_update_vsi_stats(struct ice_vsi *vsi)
6649 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6650 struct ice_eth_stats *cur_es = &vsi->eth_stats;
6651 struct ice_pf *pf = vsi->back;
6653 if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6654 test_bit(ICE_CFG_BUSY, pf->state))
6657 /* get stats as recorded by Tx/Rx rings */
6658 ice_update_vsi_ring_stats(vsi);
6660 /* get VSI stats as recorded by the hardware */
6661 ice_update_eth_stats(vsi);
6663 cur_ns->tx_errors = cur_es->tx_errors;
6664 cur_ns->rx_dropped = cur_es->rx_discards;
6665 cur_ns->tx_dropped = cur_es->tx_discards;
6666 cur_ns->multicast = cur_es->rx_multicast;
6668 /* update some more netdev stats if this is main VSI */
6669 if (vsi->type == ICE_VSI_PF) {
6670 cur_ns->rx_crc_errors = pf->stats.crc_errors;
6671 cur_ns->rx_errors = pf->stats.crc_errors +
6672 pf->stats.illegal_bytes +
6673 pf->stats.rx_len_errors +
6674 pf->stats.rx_undersize +
6675 pf->hw_csum_rx_error +
6676 pf->stats.rx_jabber +
6677 pf->stats.rx_fragments +
6678 pf->stats.rx_oversize;
6679 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
6680 /* record drops from the port level */
6681 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6686 * ice_update_pf_stats - Update PF port stats counters
6687 * @pf: PF whose stats needs to be updated
6689 void ice_update_pf_stats(struct ice_pf *pf)
6691 struct ice_hw_port_stats *prev_ps, *cur_ps;
6692 struct ice_hw *hw = &pf->hw;
6696 port = hw->port_info->lport;
6697 prev_ps = &pf->stats_prev;
6698 cur_ps = &pf->stats;
6700 if (ice_is_reset_in_progress(pf->state))
6701 pf->stat_prev_loaded = false;
6703 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6704 &prev_ps->eth.rx_bytes,
6705 &cur_ps->eth.rx_bytes);
6707 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6708 &prev_ps->eth.rx_unicast,
6709 &cur_ps->eth.rx_unicast);
6711 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6712 &prev_ps->eth.rx_multicast,
6713 &cur_ps->eth.rx_multicast);
6715 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6716 &prev_ps->eth.rx_broadcast,
6717 &cur_ps->eth.rx_broadcast);
6719 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6720 &prev_ps->eth.rx_discards,
6721 &cur_ps->eth.rx_discards);
6723 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6724 &prev_ps->eth.tx_bytes,
6725 &cur_ps->eth.tx_bytes);
6727 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6728 &prev_ps->eth.tx_unicast,
6729 &cur_ps->eth.tx_unicast);
6731 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6732 &prev_ps->eth.tx_multicast,
6733 &cur_ps->eth.tx_multicast);
6735 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6736 &prev_ps->eth.tx_broadcast,
6737 &cur_ps->eth.tx_broadcast);
6739 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6740 &prev_ps->tx_dropped_link_down,
6741 &cur_ps->tx_dropped_link_down);
6743 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6744 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6746 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6747 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6749 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6750 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6752 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6753 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6755 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6756 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6758 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6759 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6761 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6762 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6764 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
6765 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6767 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
6768 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6770 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
6771 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6773 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
6774 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6776 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6777 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6779 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6780 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6782 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6783 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6785 fd_ctr_base = hw->fd_ctr_base;
6787 ice_stat_update40(hw,
6788 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
6789 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6790 &cur_ps->fd_sb_match);
6791 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6792 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6794 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6795 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6797 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6798 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6800 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6801 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6803 ice_update_dcb_stats(pf);
6805 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6806 &prev_ps->crc_errors, &cur_ps->crc_errors);
6808 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6809 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6811 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6812 &prev_ps->mac_local_faults,
6813 &cur_ps->mac_local_faults);
6815 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6816 &prev_ps->mac_remote_faults,
6817 &cur_ps->mac_remote_faults);
6819 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
6820 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6822 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6823 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6825 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6826 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6828 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6829 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6831 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6832 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6834 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6836 pf->stat_prev_loaded = true;
6840 * ice_get_stats64 - get statistics for network device structure
6841 * @netdev: network interface device structure
6842 * @stats: main device statistics structure
6845 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6847 struct ice_netdev_priv *np = netdev_priv(netdev);
6848 struct rtnl_link_stats64 *vsi_stats;
6849 struct ice_vsi *vsi = np->vsi;
6851 vsi_stats = &vsi->net_stats;
6853 if (!vsi->num_txq || !vsi->num_rxq)
6856 /* netdev packet/byte stats come from ring counter. These are obtained
6857 * by summing up ring counters (done by ice_update_vsi_ring_stats).
6858 * But, only call the update routine and read the registers if VSI is
6861 if (!test_bit(ICE_VSI_DOWN, vsi->state))
6862 ice_update_vsi_ring_stats(vsi);
6863 stats->tx_packets = vsi_stats->tx_packets;
6864 stats->tx_bytes = vsi_stats->tx_bytes;
6865 stats->rx_packets = vsi_stats->rx_packets;
6866 stats->rx_bytes = vsi_stats->rx_bytes;
6868 /* The rest of the stats can be read from the hardware but instead we
6869 * just return values that the watchdog task has already obtained from
6872 stats->multicast = vsi_stats->multicast;
6873 stats->tx_errors = vsi_stats->tx_errors;
6874 stats->tx_dropped = vsi_stats->tx_dropped;
6875 stats->rx_errors = vsi_stats->rx_errors;
6876 stats->rx_dropped = vsi_stats->rx_dropped;
6877 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6878 stats->rx_length_errors = vsi_stats->rx_length_errors;
6882 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6883 * @vsi: VSI having NAPI disabled
6885 static void ice_napi_disable_all(struct ice_vsi *vsi)
6892 ice_for_each_q_vector(vsi, q_idx) {
6893 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6895 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6896 napi_disable(&q_vector->napi);
6898 cancel_work_sync(&q_vector->tx.dim.work);
6899 cancel_work_sync(&q_vector->rx.dim.work);
6904 * ice_down - Shutdown the connection
6905 * @vsi: The VSI being stopped
6907 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
6909 int ice_down(struct ice_vsi *vsi)
6911 int i, tx_err, rx_err, vlan_err = 0;
6913 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
6915 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6916 vlan_err = ice_vsi_del_vlan_zero(vsi);
6917 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
6918 netif_carrier_off(vsi->netdev);
6919 netif_tx_disable(vsi->netdev);
6920 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
6921 ice_eswitch_stop_all_tx_queues(vsi->back);
6924 ice_vsi_dis_irq(vsi);
6926 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6928 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6929 vsi->vsi_num, tx_err);
6930 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6931 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6933 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6934 vsi->vsi_num, tx_err);
6937 rx_err = ice_vsi_stop_all_rx_rings(vsi);
6939 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6940 vsi->vsi_num, rx_err);
6942 ice_napi_disable_all(vsi);
6944 ice_for_each_txq(vsi, i)
6945 ice_clean_tx_ring(vsi->tx_rings[i]);
6947 if (ice_is_xdp_ena_vsi(vsi))
6948 ice_for_each_xdp_txq(vsi, i)
6949 ice_clean_tx_ring(vsi->xdp_rings[i]);
6951 ice_for_each_rxq(vsi, i)
6952 ice_clean_rx_ring(vsi->rx_rings[i]);
6954 if (tx_err || rx_err || vlan_err) {
6955 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6956 vsi->vsi_num, vsi->vsw->sw_id);
6964 * ice_down_up - shutdown the VSI connection and bring it up
6965 * @vsi: the VSI to be reconnected
6967 int ice_down_up(struct ice_vsi *vsi)
6971 /* if DOWN already set, nothing to do */
6972 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
6975 ret = ice_down(vsi);
6981 netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
6989 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6990 * @vsi: VSI having resources allocated
6992 * Return 0 on success, negative on failure
6994 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6998 if (!vsi->num_txq) {
6999 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
7004 ice_for_each_txq(vsi, i) {
7005 struct ice_tx_ring *ring = vsi->tx_rings[i];
7011 ring->netdev = vsi->netdev;
7012 err = ice_setup_tx_ring(ring);
7021 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7022 * @vsi: VSI having resources allocated
7024 * Return 0 on success, negative on failure
7026 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7030 if (!vsi->num_rxq) {
7031 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7036 ice_for_each_rxq(vsi, i) {
7037 struct ice_rx_ring *ring = vsi->rx_rings[i];
7043 ring->netdev = vsi->netdev;
7044 err = ice_setup_rx_ring(ring);
7053 * ice_vsi_open_ctrl - open control VSI for use
7054 * @vsi: the VSI to open
7056 * Initialization of the Control VSI
7058 * Returns 0 on success, negative value on error
7060 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7062 char int_name[ICE_INT_NAME_STR_LEN];
7063 struct ice_pf *pf = vsi->back;
7067 dev = ice_pf_to_dev(pf);
7068 /* allocate descriptors */
7069 err = ice_vsi_setup_tx_rings(vsi);
7073 err = ice_vsi_setup_rx_rings(vsi);
7077 err = ice_vsi_cfg_lan(vsi);
7081 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7082 dev_driver_string(dev), dev_name(dev));
7083 err = ice_vsi_req_irq_msix(vsi, int_name);
7087 ice_vsi_cfg_msix(vsi);
7089 err = ice_vsi_start_all_rx_rings(vsi);
7091 goto err_up_complete;
7093 clear_bit(ICE_VSI_DOWN, vsi->state);
7094 ice_vsi_ena_irq(vsi);
7101 ice_vsi_free_rx_rings(vsi);
7103 ice_vsi_free_tx_rings(vsi);
7109 * ice_vsi_open - Called when a network interface is made active
7110 * @vsi: the VSI to open
7112 * Initialization of the VSI
7114 * Returns 0 on success, negative value on error
7116 int ice_vsi_open(struct ice_vsi *vsi)
7118 char int_name[ICE_INT_NAME_STR_LEN];
7119 struct ice_pf *pf = vsi->back;
7122 /* allocate descriptors */
7123 err = ice_vsi_setup_tx_rings(vsi);
7127 err = ice_vsi_setup_rx_rings(vsi);
7131 err = ice_vsi_cfg_lan(vsi);
7135 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7136 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7137 err = ice_vsi_req_irq_msix(vsi, int_name);
7141 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7143 if (vsi->type == ICE_VSI_PF) {
7144 /* Notify the stack of the actual queue counts. */
7145 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7149 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7154 err = ice_up_complete(vsi);
7156 goto err_up_complete;
7163 ice_vsi_free_irq(vsi);
7165 ice_vsi_free_rx_rings(vsi);
7167 ice_vsi_free_tx_rings(vsi);
7173 * ice_vsi_release_all - Delete all VSIs
7174 * @pf: PF from which all VSIs are being removed
7176 static void ice_vsi_release_all(struct ice_pf *pf)
7183 ice_for_each_vsi(pf, i) {
7187 if (pf->vsi[i]->type == ICE_VSI_CHNL)
7190 err = ice_vsi_release(pf->vsi[i]);
7192 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7193 i, err, pf->vsi[i]->vsi_num);
7198 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7199 * @pf: pointer to the PF instance
7200 * @type: VSI type to rebuild
7202 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7204 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7206 struct device *dev = ice_pf_to_dev(pf);
7209 ice_for_each_vsi(pf, i) {
7210 struct ice_vsi *vsi = pf->vsi[i];
7212 if (!vsi || vsi->type != type)
7215 /* rebuild the VSI */
7216 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7218 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7219 err, vsi->idx, ice_vsi_type_str(type));
7223 /* replay filters for the VSI */
7224 err = ice_replay_vsi(&pf->hw, vsi->idx);
7226 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7227 err, vsi->idx, ice_vsi_type_str(type));
7231 /* Re-map HW VSI number, using VSI handle that has been
7232 * previously validated in ice_replay_vsi() call above
7234 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7236 /* enable the VSI */
7237 err = ice_ena_vsi(vsi, false);
7239 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7240 err, vsi->idx, ice_vsi_type_str(type));
7244 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7245 ice_vsi_type_str(type));
7252 * ice_update_pf_netdev_link - Update PF netdev link status
7253 * @pf: pointer to the PF instance
7255 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7260 ice_for_each_vsi(pf, i) {
7261 struct ice_vsi *vsi = pf->vsi[i];
7263 if (!vsi || vsi->type != ICE_VSI_PF)
7266 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7268 netif_carrier_on(pf->vsi[i]->netdev);
7269 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7271 netif_carrier_off(pf->vsi[i]->netdev);
7272 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7278 * ice_rebuild - rebuild after reset
7279 * @pf: PF to rebuild
7280 * @reset_type: type of reset
7282 * Do not rebuild VF VSI in this flow because that is already handled via
7283 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7284 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7285 * to reset/rebuild all the VF VSI twice.
7287 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7289 struct device *dev = ice_pf_to_dev(pf);
7290 struct ice_hw *hw = &pf->hw;
7294 if (test_bit(ICE_DOWN, pf->state))
7295 goto clear_recovery;
7297 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7299 #define ICE_EMP_RESET_SLEEP_MS 5000
7300 if (reset_type == ICE_RESET_EMPR) {
7301 /* If an EMP reset has occurred, any previously pending flash
7302 * update will have completed. We no longer know whether or
7303 * not the NVM update EMP reset is restricted.
7305 pf->fw_emp_reset_disabled = false;
7307 msleep(ICE_EMP_RESET_SLEEP_MS);
7310 err = ice_init_all_ctrlq(hw);
7312 dev_err(dev, "control queues init failed %d\n", err);
7313 goto err_init_ctrlq;
7316 /* if DDP was previously loaded successfully */
7317 if (!ice_is_safe_mode(pf)) {
7318 /* reload the SW DB of filter tables */
7319 if (reset_type == ICE_RESET_PFR)
7320 ice_fill_blk_tbls(hw);
7322 /* Reload DDP Package after CORER/GLOBR reset */
7323 ice_load_pkg(NULL, pf);
7326 err = ice_clear_pf_cfg(hw);
7328 dev_err(dev, "clear PF configuration failed %d\n", err);
7329 goto err_init_ctrlq;
7332 ice_clear_pxe_mode(hw);
7334 err = ice_init_nvm(hw);
7336 dev_err(dev, "ice_init_nvm failed %d\n", err);
7337 goto err_init_ctrlq;
7340 err = ice_get_caps(hw);
7342 dev_err(dev, "ice_get_caps failed %d\n", err);
7343 goto err_init_ctrlq;
7346 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7348 dev_err(dev, "set_mac_cfg failed %d\n", err);
7349 goto err_init_ctrlq;
7352 dvm = ice_is_dvm_ena(hw);
7354 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7356 goto err_init_ctrlq;
7358 err = ice_sched_init_port(hw->port_info);
7360 goto err_sched_init_port;
7362 /* start misc vector */
7363 err = ice_req_irq_msix_misc(pf);
7365 dev_err(dev, "misc vector setup failed: %d\n", err);
7366 goto err_sched_init_port;
7369 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7370 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7371 if (!rd32(hw, PFQF_FD_SIZE)) {
7372 u16 unused, guar, b_effort;
7374 guar = hw->func_caps.fd_fltr_guar;
7375 b_effort = hw->func_caps.fd_fltr_best_effort;
7377 /* force guaranteed filter pool for PF */
7378 ice_alloc_fd_guar_item(hw, &unused, guar);
7379 /* force shared filter pool for PF */
7380 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7384 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7385 ice_dcb_rebuild(pf);
7387 /* If the PF previously had enabled PTP, PTP init needs to happen before
7388 * the VSI rebuild. If not, this causes the PTP link status events to
7391 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7394 if (ice_is_feature_supported(pf, ICE_F_GNSS))
7397 /* rebuild PF VSI */
7398 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7400 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7401 goto err_vsi_rebuild;
7404 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
7406 dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
7407 goto err_vsi_rebuild;
7410 if (reset_type == ICE_RESET_PFR) {
7411 err = ice_rebuild_channels(pf);
7413 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7415 goto err_vsi_rebuild;
7419 /* If Flow Director is active */
7420 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7421 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7423 dev_err(dev, "control VSI rebuild failed: %d\n", err);
7424 goto err_vsi_rebuild;
7427 /* replay HW Flow Director recipes */
7429 ice_fdir_replay_flows(hw);
7431 /* replay Flow Director filters */
7432 ice_fdir_replay_fltrs(pf);
7434 ice_rebuild_arfs(pf);
7437 ice_update_pf_netdev_link(pf);
7439 /* tell the firmware we are up */
7440 err = ice_send_version(pf);
7442 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7444 goto err_vsi_rebuild;
7447 ice_replay_post(hw);
7449 /* if we get here, reset flow is successful */
7450 clear_bit(ICE_RESET_FAILED, pf->state);
7452 ice_plug_aux_dev(pf);
7453 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
7454 ice_lag_rebuild(pf);
7456 /* Restore timestamp mode settings after VSI rebuild */
7457 ice_ptp_restore_timestamp_mode(pf);
7461 err_sched_init_port:
7462 ice_sched_cleanup_all(hw);
7464 ice_shutdown_all_ctrlq(hw);
7465 set_bit(ICE_RESET_FAILED, pf->state);
7467 /* set this bit in PF state to control service task scheduling */
7468 set_bit(ICE_NEEDS_RESTART, pf->state);
7469 dev_err(dev, "Rebuild failed, unload and reload driver\n");
7473 * ice_change_mtu - NDO callback to change the MTU
7474 * @netdev: network interface device structure
7475 * @new_mtu: new value for maximum frame size
7477 * Returns 0 on success, negative on failure
7479 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7481 struct ice_netdev_priv *np = netdev_priv(netdev);
7482 struct ice_vsi *vsi = np->vsi;
7483 struct ice_pf *pf = vsi->back;
7484 struct bpf_prog *prog;
7488 if (new_mtu == (int)netdev->mtu) {
7489 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7493 prog = vsi->xdp_prog;
7494 if (prog && !prog->aux->xdp_has_frags) {
7495 int frame_size = ice_max_xdp_frame_size(vsi);
7497 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7498 netdev_err(netdev, "max MTU for XDP usage is %d\n",
7499 frame_size - ICE_ETH_PKT_HDR_PAD);
7502 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7503 if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7504 netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7505 ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7510 /* if a reset is in progress, wait for some time for it to complete */
7512 if (ice_is_reset_in_progress(pf->state)) {
7514 usleep_range(1000, 2000);
7519 } while (count < 100);
7522 netdev_err(netdev, "can't change MTU. Device is busy\n");
7526 netdev->mtu = (unsigned int)new_mtu;
7527 err = ice_down_up(vsi);
7531 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7532 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7538 * ice_eth_ioctl - Access the hwtstamp interface
7539 * @netdev: network interface device structure
7540 * @ifr: interface request data
7541 * @cmd: ioctl command
7543 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7545 struct ice_netdev_priv *np = netdev_priv(netdev);
7546 struct ice_pf *pf = np->vsi->back;
7550 return ice_ptp_get_ts_config(pf, ifr);
7552 return ice_ptp_set_ts_config(pf, ifr);
7559 * ice_aq_str - convert AQ err code to a string
7560 * @aq_err: the AQ error code to convert
7562 const char *ice_aq_str(enum ice_aq_err aq_err)
7567 case ICE_AQ_RC_EPERM:
7568 return "ICE_AQ_RC_EPERM";
7569 case ICE_AQ_RC_ENOENT:
7570 return "ICE_AQ_RC_ENOENT";
7571 case ICE_AQ_RC_ENOMEM:
7572 return "ICE_AQ_RC_ENOMEM";
7573 case ICE_AQ_RC_EBUSY:
7574 return "ICE_AQ_RC_EBUSY";
7575 case ICE_AQ_RC_EEXIST:
7576 return "ICE_AQ_RC_EEXIST";
7577 case ICE_AQ_RC_EINVAL:
7578 return "ICE_AQ_RC_EINVAL";
7579 case ICE_AQ_RC_ENOSPC:
7580 return "ICE_AQ_RC_ENOSPC";
7581 case ICE_AQ_RC_ENOSYS:
7582 return "ICE_AQ_RC_ENOSYS";
7583 case ICE_AQ_RC_EMODE:
7584 return "ICE_AQ_RC_EMODE";
7585 case ICE_AQ_RC_ENOSEC:
7586 return "ICE_AQ_RC_ENOSEC";
7587 case ICE_AQ_RC_EBADSIG:
7588 return "ICE_AQ_RC_EBADSIG";
7589 case ICE_AQ_RC_ESVN:
7590 return "ICE_AQ_RC_ESVN";
7591 case ICE_AQ_RC_EBADMAN:
7592 return "ICE_AQ_RC_EBADMAN";
7593 case ICE_AQ_RC_EBADBUF:
7594 return "ICE_AQ_RC_EBADBUF";
7597 return "ICE_AQ_RC_UNKNOWN";
7601 * ice_set_rss_lut - Set RSS LUT
7602 * @vsi: Pointer to VSI structure
7603 * @lut: Lookup table
7604 * @lut_size: Lookup table size
7606 * Returns 0 on success, negative on failure
7608 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7610 struct ice_aq_get_set_rss_lut_params params = {};
7611 struct ice_hw *hw = &vsi->back->hw;
7617 params.vsi_handle = vsi->idx;
7618 params.lut_size = lut_size;
7619 params.lut_type = vsi->rss_lut_type;
7622 status = ice_aq_set_rss_lut(hw, ¶ms);
7624 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7625 status, ice_aq_str(hw->adminq.sq_last_status));
7631 * ice_set_rss_key - Set RSS key
7632 * @vsi: Pointer to the VSI structure
7633 * @seed: RSS hash seed
7635 * Returns 0 on success, negative on failure
7637 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7639 struct ice_hw *hw = &vsi->back->hw;
7645 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7647 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7648 status, ice_aq_str(hw->adminq.sq_last_status));
7654 * ice_get_rss_lut - Get RSS LUT
7655 * @vsi: Pointer to VSI structure
7656 * @lut: Buffer to store the lookup table entries
7657 * @lut_size: Size of buffer to store the lookup table entries
7659 * Returns 0 on success, negative on failure
7661 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7663 struct ice_aq_get_set_rss_lut_params params = {};
7664 struct ice_hw *hw = &vsi->back->hw;
7670 params.vsi_handle = vsi->idx;
7671 params.lut_size = lut_size;
7672 params.lut_type = vsi->rss_lut_type;
7675 status = ice_aq_get_rss_lut(hw, ¶ms);
7677 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7678 status, ice_aq_str(hw->adminq.sq_last_status));
7684 * ice_get_rss_key - Get RSS key
7685 * @vsi: Pointer to VSI structure
7686 * @seed: Buffer to store the key in
7688 * Returns 0 on success, negative on failure
7690 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7692 struct ice_hw *hw = &vsi->back->hw;
7698 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7700 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7701 status, ice_aq_str(hw->adminq.sq_last_status));
7707 * ice_bridge_getlink - Get the hardware bridge mode
7710 * @seq: RTNL message seq
7711 * @dev: the netdev being configured
7712 * @filter_mask: filter mask passed in
7713 * @nlflags: netlink flags passed in
7715 * Return the bridge mode (VEB/VEPA)
7718 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7719 struct net_device *dev, u32 filter_mask, int nlflags)
7721 struct ice_netdev_priv *np = netdev_priv(dev);
7722 struct ice_vsi *vsi = np->vsi;
7723 struct ice_pf *pf = vsi->back;
7726 bmode = pf->first_sw->bridge_mode;
7728 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7733 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7734 * @vsi: Pointer to VSI structure
7735 * @bmode: Hardware bridge mode (VEB/VEPA)
7737 * Returns 0 on success, negative on failure
7739 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7741 struct ice_aqc_vsi_props *vsi_props;
7742 struct ice_hw *hw = &vsi->back->hw;
7743 struct ice_vsi_ctx *ctxt;
7746 vsi_props = &vsi->info;
7748 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7752 ctxt->info = vsi->info;
7754 if (bmode == BRIDGE_MODE_VEB)
7755 /* change from VEPA to VEB mode */
7756 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7758 /* change from VEB to VEPA mode */
7759 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7760 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
7762 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7764 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
7765 bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
7768 /* Update sw flags for book keeping */
7769 vsi_props->sw_flags = ctxt->info.sw_flags;
7777 * ice_bridge_setlink - Set the hardware bridge mode
7778 * @dev: the netdev being configured
7779 * @nlh: RTNL message
7780 * @flags: bridge setlink flags
7781 * @extack: netlink extended ack
7783 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7784 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7785 * not already set for all VSIs connected to this switch. And also update the
7786 * unicast switch filter rules for the corresponding switch of the netdev.
7789 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7790 u16 __always_unused flags,
7791 struct netlink_ext_ack __always_unused *extack)
7793 struct ice_netdev_priv *np = netdev_priv(dev);
7794 struct ice_pf *pf = np->vsi->back;
7795 struct nlattr *attr, *br_spec;
7796 struct ice_hw *hw = &pf->hw;
7797 struct ice_sw *pf_sw;
7798 int rem, v, err = 0;
7800 pf_sw = pf->first_sw;
7801 /* find the attribute in the netlink message */
7802 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7804 nla_for_each_nested(attr, br_spec, rem) {
7807 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7809 mode = nla_get_u16(attr);
7810 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7812 /* Continue if bridge mode is not being flipped */
7813 if (mode == pf_sw->bridge_mode)
7815 /* Iterates through the PF VSI list and update the loopback
7818 ice_for_each_vsi(pf, v) {
7821 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7826 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7827 /* Update the unicast switch filter rules for the corresponding
7828 * switch of the netdev
7830 err = ice_update_sw_rule_bridge_mode(hw);
7832 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
7834 ice_aq_str(hw->adminq.sq_last_status));
7835 /* revert hw->evb_veb */
7836 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7840 pf_sw->bridge_mode = mode;
7847 * ice_tx_timeout - Respond to a Tx Hang
7848 * @netdev: network interface device structure
7849 * @txqueue: Tx queue
7851 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7853 struct ice_netdev_priv *np = netdev_priv(netdev);
7854 struct ice_tx_ring *tx_ring = NULL;
7855 struct ice_vsi *vsi = np->vsi;
7856 struct ice_pf *pf = vsi->back;
7859 pf->tx_timeout_count++;
7861 /* Check if PFC is enabled for the TC to which the queue belongs
7862 * to. If yes then Tx timeout is not caused by a hung queue, no
7863 * need to reset and rebuild
7865 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7866 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7871 /* now that we have an index, find the tx_ring struct */
7872 ice_for_each_txq(vsi, i)
7873 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7874 if (txqueue == vsi->tx_rings[i]->q_index) {
7875 tx_ring = vsi->tx_rings[i];
7879 /* Reset recovery level if enough time has elapsed after last timeout.
7880 * Also ensure no new reset action happens before next timeout period.
7882 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7883 pf->tx_timeout_recovery_level = 1;
7884 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7885 netdev->watchdog_timeo)))
7889 struct ice_hw *hw = &pf->hw;
7892 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7893 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7894 /* Read interrupt register */
7895 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7897 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7898 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7899 head, tx_ring->next_to_use, val);
7902 pf->tx_timeout_last_recovery = jiffies;
7903 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7904 pf->tx_timeout_recovery_level, txqueue);
7906 switch (pf->tx_timeout_recovery_level) {
7908 set_bit(ICE_PFR_REQ, pf->state);
7911 set_bit(ICE_CORER_REQ, pf->state);
7914 set_bit(ICE_GLOBR_REQ, pf->state);
7917 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7918 set_bit(ICE_DOWN, pf->state);
7919 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7920 set_bit(ICE_SERVICE_DIS, pf->state);
7924 ice_service_task_schedule(pf);
7925 pf->tx_timeout_recovery_level++;
7929 * ice_setup_tc_cls_flower - flower classifier offloads
7930 * @np: net device to configure
7931 * @filter_dev: device on which filter is added
7932 * @cls_flower: offload data
7935 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
7936 struct net_device *filter_dev,
7937 struct flow_cls_offload *cls_flower)
7939 struct ice_vsi *vsi = np->vsi;
7941 if (cls_flower->common.chain_index)
7944 switch (cls_flower->command) {
7945 case FLOW_CLS_REPLACE:
7946 return ice_add_cls_flower(filter_dev, vsi, cls_flower);
7947 case FLOW_CLS_DESTROY:
7948 return ice_del_cls_flower(vsi, cls_flower);
7955 * ice_setup_tc_block_cb - callback handler registered for TC block
7956 * @type: TC SETUP type
7957 * @type_data: TC flower offload data that contains user input
7958 * @cb_priv: netdev private data
7961 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
7963 struct ice_netdev_priv *np = cb_priv;
7966 case TC_SETUP_CLSFLOWER:
7967 return ice_setup_tc_cls_flower(np, np->vsi->netdev,
7975 * ice_validate_mqprio_qopt - Validate TCF input parameters
7976 * @vsi: Pointer to VSI
7977 * @mqprio_qopt: input parameters for mqprio queue configuration
7979 * This function validates MQPRIO params, such as qcount (power of 2 wherever
7980 * needed), and make sure user doesn't specify qcount and BW rate limit
7981 * for TCs, which are more than "num_tc"
7984 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
7985 struct tc_mqprio_qopt_offload *mqprio_qopt)
7987 int non_power_of_2_qcount = 0;
7988 struct ice_pf *pf = vsi->back;
7989 int max_rss_q_cnt = 0;
7990 u64 sum_min_rate = 0;
7995 if (vsi->type != ICE_VSI_PF)
7998 if (mqprio_qopt->qopt.offset[0] != 0 ||
7999 mqprio_qopt->qopt.num_tc < 1 ||
8000 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
8003 dev = ice_pf_to_dev(pf);
8004 vsi->ch_rss_size = 0;
8005 num_tc = mqprio_qopt->qopt.num_tc;
8006 speed = ice_get_link_speed_kbps(vsi);
8008 for (i = 0; num_tc; i++) {
8009 int qcount = mqprio_qopt->qopt.count[i];
8010 u64 max_rate, min_rate, rem;
8015 if (is_power_of_2(qcount)) {
8016 if (non_power_of_2_qcount &&
8017 qcount > non_power_of_2_qcount) {
8018 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
8019 qcount, non_power_of_2_qcount);
8022 if (qcount > max_rss_q_cnt)
8023 max_rss_q_cnt = qcount;
8025 if (non_power_of_2_qcount &&
8026 qcount != non_power_of_2_qcount) {
8027 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
8028 qcount, non_power_of_2_qcount);
8031 if (qcount < max_rss_q_cnt) {
8032 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8033 qcount, max_rss_q_cnt);
8036 max_rss_q_cnt = qcount;
8037 non_power_of_2_qcount = qcount;
8040 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8041 * converts the bandwidth rate limit into Bytes/s when
8042 * passing it down to the driver. So convert input bandwidth
8043 * from Bytes/s to Kbps
8045 max_rate = mqprio_qopt->max_rate[i];
8046 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8048 /* min_rate is minimum guaranteed rate and it can't be zero */
8049 min_rate = mqprio_qopt->min_rate[i];
8050 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8051 sum_min_rate += min_rate;
8053 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8054 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8055 min_rate, ICE_MIN_BW_LIMIT);
8059 if (max_rate && max_rate > speed) {
8060 dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
8061 i, max_rate, speed);
8065 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8067 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8068 i, ICE_MIN_BW_LIMIT);
8072 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8074 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8075 i, ICE_MIN_BW_LIMIT);
8079 /* min_rate can't be more than max_rate, except when max_rate
8080 * is zero (implies max_rate sought is max line rate). In such
8081 * a case min_rate can be more than max.
8083 if (max_rate && min_rate > max_rate) {
8084 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8085 min_rate, max_rate);
8089 if (i >= mqprio_qopt->qopt.num_tc - 1)
8091 if (mqprio_qopt->qopt.offset[i + 1] !=
8092 (mqprio_qopt->qopt.offset[i] + qcount))
8096 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8099 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8102 if (sum_min_rate && sum_min_rate > (u64)speed) {
8103 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8104 sum_min_rate, speed);
8108 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8109 vsi->ch_rss_size = max_rss_q_cnt;
8115 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8116 * @pf: ptr to PF device
8119 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8121 struct device *dev = ice_pf_to_dev(pf);
8126 if (!(vsi->num_gfltr || vsi->num_bfltr))
8130 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8131 struct ice_fd_hw_prof *prof;
8135 if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8136 hw->fdir_prof[flow]->cnt))
8139 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8140 enum ice_flow_priority prio;
8143 /* add this VSI to FDir profile for this flow */
8144 prio = ICE_FLOW_PRIO_NORMAL;
8145 prof = hw->fdir_prof[flow];
8146 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
8147 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
8148 prof->vsi_h[0], vsi->idx,
8149 prio, prof->fdir_seg[tun],
8152 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8157 prof->entry_h[prof->cnt][tun] = entry_h;
8160 /* store VSI for filter replay and delete */
8161 prof->vsi_h[prof->cnt] = vsi->idx;
8165 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8170 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8176 * ice_add_channel - add a channel by adding VSI
8177 * @pf: ptr to PF device
8178 * @sw_id: underlying HW switching element ID
8179 * @ch: ptr to channel structure
8181 * Add a channel (VSI) using add_vsi and queue_map
8183 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8185 struct device *dev = ice_pf_to_dev(pf);
8186 struct ice_vsi *vsi;
8188 if (ch->type != ICE_VSI_CHNL) {
8189 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8193 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8194 if (!vsi || vsi->type != ICE_VSI_CHNL) {
8195 dev_err(dev, "create chnl VSI failure\n");
8199 ice_add_vsi_to_fdir(pf, vsi);
8202 ch->vsi_num = vsi->vsi_num;
8203 ch->info.mapping_flags = vsi->info.mapping_flags;
8205 /* set the back pointer of channel for newly created VSI */
8208 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8209 sizeof(vsi->info.q_mapping));
8210 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8211 sizeof(vsi->info.tc_mapping));
8218 * @vsi: the VSI being setup
8219 * @ch: ptr to channel structure
8221 * Configure channel specific resources such as rings, vector.
8223 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8227 for (i = 0; i < ch->num_txq; i++) {
8228 struct ice_q_vector *tx_q_vector, *rx_q_vector;
8229 struct ice_ring_container *rc;
8230 struct ice_tx_ring *tx_ring;
8231 struct ice_rx_ring *rx_ring;
8233 tx_ring = vsi->tx_rings[ch->base_q + i];
8234 rx_ring = vsi->rx_rings[ch->base_q + i];
8235 if (!tx_ring || !rx_ring)
8238 /* setup ring being channel enabled */
8242 /* following code block sets up vector specific attributes */
8243 tx_q_vector = tx_ring->q_vector;
8244 rx_q_vector = rx_ring->q_vector;
8245 if (!tx_q_vector && !rx_q_vector)
8249 tx_q_vector->ch = ch;
8250 /* setup Tx and Rx ITR setting if DIM is off */
8251 rc = &tx_q_vector->tx;
8252 if (!ITR_IS_DYNAMIC(rc))
8253 ice_write_itr(rc, rc->itr_setting);
8256 rx_q_vector->ch = ch;
8257 /* setup Tx and Rx ITR setting if DIM is off */
8258 rc = &rx_q_vector->rx;
8259 if (!ITR_IS_DYNAMIC(rc))
8260 ice_write_itr(rc, rc->itr_setting);
8264 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8265 * GLINT_ITR register would have written to perform in-context
8266 * update, hence perform flush
8268 if (ch->num_txq || ch->num_rxq)
8269 ice_flush(&vsi->back->hw);
8273 * ice_cfg_chnl_all_res - configure channel resources
8274 * @vsi: pte to main_vsi
8275 * @ch: ptr to channel structure
8277 * This function configures channel specific resources such as flow-director
8278 * counter index, and other resources such as queues, vectors, ITR settings
8281 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8283 /* configure channel (aka ADQ) resources such as queues, vectors,
8284 * ITR settings for channel specific vectors and anything else
8286 ice_chnl_cfg_res(vsi, ch);
8290 * ice_setup_hw_channel - setup new channel
8291 * @pf: ptr to PF device
8292 * @vsi: the VSI being setup
8293 * @ch: ptr to channel structure
8294 * @sw_id: underlying HW switching element ID
8295 * @type: type of channel to be created (VMDq2/VF)
8297 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8298 * and configures Tx rings accordingly
8301 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8302 struct ice_channel *ch, u16 sw_id, u8 type)
8304 struct device *dev = ice_pf_to_dev(pf);
8307 ch->base_q = vsi->next_base_q;
8310 ret = ice_add_channel(pf, sw_id, ch);
8312 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8316 /* configure/setup ADQ specific resources */
8317 ice_cfg_chnl_all_res(vsi, ch);
8319 /* make sure to update the next_base_q so that subsequent channel's
8320 * (aka ADQ) VSI queue map is correct
8322 vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8323 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8330 * ice_setup_channel - setup new channel using uplink element
8331 * @pf: ptr to PF device
8332 * @vsi: the VSI being setup
8333 * @ch: ptr to channel structure
8335 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8336 * and uplink switching element
8339 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8340 struct ice_channel *ch)
8342 struct device *dev = ice_pf_to_dev(pf);
8346 if (vsi->type != ICE_VSI_PF) {
8347 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8351 sw_id = pf->first_sw->sw_id;
8353 /* create channel (VSI) */
8354 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8356 dev_err(dev, "failed to setup hw_channel\n");
8359 dev_dbg(dev, "successfully created channel()\n");
8361 return ch->ch_vsi ? true : false;
8365 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8366 * @vsi: VSI to be configured
8367 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8368 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8371 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8375 err = ice_set_min_bw_limit(vsi, min_tx_rate);
8379 return ice_set_max_bw_limit(vsi, max_tx_rate);
8383 * ice_create_q_channel - function to create channel
8384 * @vsi: VSI to be configured
8385 * @ch: ptr to channel (it contains channel specific params)
8387 * This function creates channel (VSI) using num_queues specified by user,
8388 * reconfigs RSS if needed.
8390 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8392 struct ice_pf *pf = vsi->back;
8398 dev = ice_pf_to_dev(pf);
8399 if (!ch->num_txq || !ch->num_rxq) {
8400 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8404 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8405 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8406 vsi->cnt_q_avail, ch->num_txq);
8410 if (!ice_setup_channel(pf, vsi, ch)) {
8411 dev_info(dev, "Failed to setup channel\n");
8414 /* configure BW rate limit */
8415 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8418 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8421 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8422 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8424 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8425 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8428 vsi->cnt_q_avail -= ch->num_txq;
8434 * ice_rem_all_chnl_fltrs - removes all channel filters
8435 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8437 * Remove all advanced switch filters only if they are channel specific
8438 * tc-flower based filter
8440 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8442 struct ice_tc_flower_fltr *fltr;
8443 struct hlist_node *node;
8445 /* to remove all channel filters, iterate an ordered list of filters */
8446 hlist_for_each_entry_safe(fltr, node,
8447 &pf->tc_flower_fltr_list,
8449 struct ice_rule_query_data rule;
8452 /* for now process only channel specific filters */
8453 if (!ice_is_chnl_fltr(fltr))
8456 rule.rid = fltr->rid;
8457 rule.rule_id = fltr->rule_id;
8458 rule.vsi_handle = fltr->dest_vsi_handle;
8459 status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8461 if (status == -ENOENT)
8462 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8465 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8467 } else if (fltr->dest_vsi) {
8468 /* update advanced switch filter count */
8469 if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8470 u32 flags = fltr->flags;
8472 fltr->dest_vsi->num_chnl_fltr--;
8473 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8474 ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8475 pf->num_dmac_chnl_fltrs--;
8479 hlist_del(&fltr->tc_flower_node);
8485 * ice_remove_q_channels - Remove queue channels for the TCs
8486 * @vsi: VSI to be configured
8487 * @rem_fltr: delete advanced switch filter or not
8489 * Remove queue channels for the TCs
8491 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8493 struct ice_channel *ch, *ch_tmp;
8494 struct ice_pf *pf = vsi->back;
8497 /* remove all tc-flower based filter if they are channel filters only */
8499 ice_rem_all_chnl_fltrs(pf);
8501 /* remove ntuple filters since queue configuration is being changed */
8502 if (vsi->netdev->features & NETIF_F_NTUPLE) {
8503 struct ice_hw *hw = &pf->hw;
8505 mutex_lock(&hw->fdir_fltr_lock);
8506 ice_fdir_del_all_fltrs(vsi);
8507 mutex_unlock(&hw->fdir_fltr_lock);
8510 /* perform cleanup for channels if they exist */
8511 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8512 struct ice_vsi *ch_vsi;
8514 list_del(&ch->list);
8515 ch_vsi = ch->ch_vsi;
8521 /* Reset queue contexts */
8522 for (i = 0; i < ch->num_rxq; i++) {
8523 struct ice_tx_ring *tx_ring;
8524 struct ice_rx_ring *rx_ring;
8526 tx_ring = vsi->tx_rings[ch->base_q + i];
8527 rx_ring = vsi->rx_rings[ch->base_q + i];
8530 if (tx_ring->q_vector)
8531 tx_ring->q_vector->ch = NULL;
8535 if (rx_ring->q_vector)
8536 rx_ring->q_vector->ch = NULL;
8540 /* Release FD resources for the channel VSI */
8541 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8543 /* clear the VSI from scheduler tree */
8544 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8546 /* Delete VSI from FW, PF and HW VSI arrays */
8547 ice_vsi_delete(ch->ch_vsi);
8549 /* free the channel */
8553 /* clear the channel VSI map which is stored in main VSI */
8554 ice_for_each_chnl_tc(i)
8555 vsi->tc_map_vsi[i] = NULL;
8557 /* reset main VSI's all TC information */
8563 * ice_rebuild_channels - rebuild channel
8566 * Recreate channel VSIs and replay filters
8568 static int ice_rebuild_channels(struct ice_pf *pf)
8570 struct device *dev = ice_pf_to_dev(pf);
8571 struct ice_vsi *main_vsi;
8572 bool rem_adv_fltr = true;
8573 struct ice_channel *ch;
8574 struct ice_vsi *vsi;
8578 main_vsi = ice_get_main_vsi(pf);
8582 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8583 main_vsi->old_numtc == 1)
8584 return 0; /* nothing to be done */
8586 /* reconfigure main VSI based on old value of TC and cached values
8589 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8591 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8592 main_vsi->old_ena_tc, main_vsi->vsi_num);
8596 /* rebuild ADQ VSIs */
8597 ice_for_each_vsi(pf, i) {
8598 enum ice_vsi_type type;
8601 if (!vsi || vsi->type != ICE_VSI_CHNL)
8606 /* rebuild ADQ VSI */
8607 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
8609 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8610 ice_vsi_type_str(type), vsi->idx, err);
8614 /* Re-map HW VSI number, using VSI handle that has been
8615 * previously validated in ice_replay_vsi() call above
8617 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8619 /* replay filters for the VSI */
8620 err = ice_replay_vsi(&pf->hw, vsi->idx);
8622 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8623 ice_vsi_type_str(type), err, vsi->idx);
8624 rem_adv_fltr = false;
8627 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8628 ice_vsi_type_str(type), vsi->idx);
8630 /* store ADQ VSI at correct TC index in main VSI's
8633 main_vsi->tc_map_vsi[tc_idx++] = vsi;
8636 /* ADQ VSI(s) has been rebuilt successfully, so setup
8637 * channel for main VSI's Tx and Rx rings
8639 list_for_each_entry(ch, &main_vsi->ch_list, list) {
8640 struct ice_vsi *ch_vsi;
8642 ch_vsi = ch->ch_vsi;
8646 /* reconfig channel resources */
8647 ice_cfg_chnl_all_res(main_vsi, ch);
8649 /* replay BW rate limit if it is non-zero */
8650 if (!ch->max_tx_rate && !ch->min_tx_rate)
8653 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8656 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8657 err, ch->max_tx_rate, ch->min_tx_rate,
8660 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8661 ch->max_tx_rate, ch->min_tx_rate,
8665 /* reconfig RSS for main VSI */
8666 if (main_vsi->ch_rss_size)
8667 ice_vsi_cfg_rss_lut_key(main_vsi);
8672 ice_remove_q_channels(main_vsi, rem_adv_fltr);
8677 * ice_create_q_channels - Add queue channel for the given TCs
8678 * @vsi: VSI to be configured
8680 * Configures queue channel mapping to the given TCs
8682 static int ice_create_q_channels(struct ice_vsi *vsi)
8684 struct ice_pf *pf = vsi->back;
8685 struct ice_channel *ch;
8688 ice_for_each_chnl_tc(i) {
8689 if (!(vsi->all_enatc & BIT(i)))
8692 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8697 INIT_LIST_HEAD(&ch->list);
8698 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8699 ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8700 ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8701 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8702 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8704 /* convert to Kbits/s */
8705 if (ch->max_tx_rate)
8706 ch->max_tx_rate = div_u64(ch->max_tx_rate,
8707 ICE_BW_KBPS_DIVISOR);
8708 if (ch->min_tx_rate)
8709 ch->min_tx_rate = div_u64(ch->min_tx_rate,
8710 ICE_BW_KBPS_DIVISOR);
8712 ret = ice_create_q_channel(vsi, ch);
8714 dev_err(ice_pf_to_dev(pf),
8715 "failed creating channel TC:%d\n", i);
8719 list_add_tail(&ch->list, &vsi->ch_list);
8720 vsi->tc_map_vsi[i] = ch->ch_vsi;
8721 dev_dbg(ice_pf_to_dev(pf),
8722 "successfully created channel: VSI %pK\n", ch->ch_vsi);
8727 ice_remove_q_channels(vsi, false);
8733 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8734 * @netdev: net device to configure
8735 * @type_data: TC offload data
8737 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8739 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8740 struct ice_netdev_priv *np = netdev_priv(netdev);
8741 struct ice_vsi *vsi = np->vsi;
8742 struct ice_pf *pf = vsi->back;
8743 u16 mode, ena_tc_qdisc = 0;
8744 int cur_txq, cur_rxq;
8749 dev = ice_pf_to_dev(pf);
8750 num_tcf = mqprio_qopt->qopt.num_tc;
8751 hw = mqprio_qopt->qopt.hw;
8752 mode = mqprio_qopt->mode;
8754 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8755 vsi->ch_rss_size = 0;
8756 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8760 /* Generate queue region map for number of TCF requested */
8761 for (i = 0; i < num_tcf; i++)
8762 ena_tc_qdisc |= BIT(i);
8765 case TC_MQPRIO_MODE_CHANNEL:
8767 if (pf->hw.port_info->is_custom_tx_enabled) {
8768 dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
8771 ice_tear_down_devlink_rate_tree(pf);
8773 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8775 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8779 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8780 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8781 /* don't assume state of hw_tc_offload during driver load
8782 * and set the flag for TC flower filter if hw_tc_offload
8785 if (vsi->netdev->features & NETIF_F_HW_TC)
8786 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8794 /* Requesting same TCF configuration as already enabled */
8795 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8796 mode != TC_MQPRIO_MODE_CHANNEL)
8799 /* Pause VSI queues */
8800 ice_dis_vsi(vsi, true);
8802 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8803 ice_remove_q_channels(vsi, true);
8805 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8806 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8808 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8811 /* logic to rebuild VSI, same like ethtool -L */
8812 u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8814 for (i = 0; i < num_tcf; i++) {
8815 if (!(ena_tc_qdisc & BIT(i)))
8818 offset = vsi->mqprio_qopt.qopt.offset[i];
8819 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8820 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8822 vsi->req_txq = offset + qcount_tx;
8823 vsi->req_rxq = offset + qcount_rx;
8825 /* store away original rss_size info, so that it gets reused
8826 * form ice_vsi_rebuild during tc-qdisc delete stage - to
8827 * determine, what should be the rss_sizefor main VSI
8829 vsi->orig_rss_size = vsi->rss_size;
8832 /* save current values of Tx and Rx queues before calling VSI rebuild
8833 * for fallback option
8835 cur_txq = vsi->num_txq;
8836 cur_rxq = vsi->num_rxq;
8838 /* proceed with rebuild main VSI using correct number of queues */
8839 ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
8841 /* fallback to current number of queues */
8842 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8843 vsi->req_txq = cur_txq;
8844 vsi->req_rxq = cur_rxq;
8845 clear_bit(ICE_RESET_FAILED, pf->state);
8846 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
8847 dev_err(dev, "Rebuild of main VSI failed again\n");
8852 vsi->all_numtc = num_tcf;
8853 vsi->all_enatc = ena_tc_qdisc;
8854 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8856 netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8861 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8862 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8863 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8865 /* set TC0 rate limit if specified */
8866 if (max_tx_rate || min_tx_rate) {
8867 /* convert to Kbits/s */
8869 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8871 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8873 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8875 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8876 max_tx_rate, min_tx_rate, vsi->vsi_num);
8878 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8879 max_tx_rate, min_tx_rate, vsi->vsi_num);
8883 ret = ice_create_q_channels(vsi);
8885 netdev_err(netdev, "failed configuring queue channels\n");
8888 netdev_dbg(netdev, "successfully configured channels\n");
8892 if (vsi->ch_rss_size)
8893 ice_vsi_cfg_rss_lut_key(vsi);
8896 /* if error, reset the all_numtc and all_enatc */
8902 ice_ena_vsi(vsi, true);
8907 static LIST_HEAD(ice_block_cb_list);
8910 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8913 struct ice_netdev_priv *np = netdev_priv(netdev);
8914 struct ice_pf *pf = np->vsi->back;
8915 bool locked = false;
8919 case TC_SETUP_BLOCK:
8920 return flow_block_cb_setup_simple(type_data,
8922 ice_setup_tc_block_cb,
8924 case TC_SETUP_QDISC_MQPRIO:
8925 if (ice_is_eswitch_mode_switchdev(pf)) {
8926 netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
8931 mutex_lock(&pf->adev_mutex);
8932 device_lock(&pf->adev->dev);
8934 if (pf->adev->dev.driver) {
8935 netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
8941 /* setup traffic classifier for receive side */
8942 mutex_lock(&pf->tc_mutex);
8943 err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
8944 mutex_unlock(&pf->tc_mutex);
8948 device_unlock(&pf->adev->dev);
8949 mutex_unlock(&pf->adev_mutex);
8958 static struct ice_indr_block_priv *
8959 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
8960 struct net_device *netdev)
8962 struct ice_indr_block_priv *cb_priv;
8964 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
8965 if (!cb_priv->netdev)
8967 if (cb_priv->netdev == netdev)
8974 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
8977 struct ice_indr_block_priv *priv = indr_priv;
8978 struct ice_netdev_priv *np = priv->np;
8981 case TC_SETUP_CLSFLOWER:
8982 return ice_setup_tc_cls_flower(np, priv->netdev,
8983 (struct flow_cls_offload *)
8991 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
8992 struct ice_netdev_priv *np,
8993 struct flow_block_offload *f, void *data,
8994 void (*cleanup)(struct flow_block_cb *block_cb))
8996 struct ice_indr_block_priv *indr_priv;
8997 struct flow_block_cb *block_cb;
8999 if (!ice_is_tunnel_supported(netdev) &&
9000 !(is_vlan_dev(netdev) &&
9001 vlan_dev_real_dev(netdev) == np->vsi->netdev))
9004 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9007 switch (f->command) {
9008 case FLOW_BLOCK_BIND:
9009 indr_priv = ice_indr_block_priv_lookup(np, netdev);
9013 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
9017 indr_priv->netdev = netdev;
9019 list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
9022 flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
9023 indr_priv, indr_priv,
9024 ice_rep_indr_tc_block_unbind,
9025 f, netdev, sch, data, np,
9028 if (IS_ERR(block_cb)) {
9029 list_del(&indr_priv->list);
9031 return PTR_ERR(block_cb);
9033 flow_block_cb_add(block_cb, f);
9034 list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9036 case FLOW_BLOCK_UNBIND:
9037 indr_priv = ice_indr_block_priv_lookup(np, netdev);
9041 block_cb = flow_block_cb_lookup(f->block,
9042 ice_indr_setup_block_cb,
9047 flow_indr_block_cb_remove(block_cb, f);
9049 list_del(&block_cb->driver_list);
9058 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9059 void *cb_priv, enum tc_setup_type type, void *type_data,
9061 void (*cleanup)(struct flow_block_cb *block_cb))
9064 case TC_SETUP_BLOCK:
9065 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9074 * ice_open - Called when a network interface becomes active
9075 * @netdev: network interface device structure
9077 * The open entry point is called when a network interface is made
9078 * active by the system (IFF_UP). At this point all resources needed
9079 * for transmit and receive operations are allocated, the interrupt
9080 * handler is registered with the OS, the netdev watchdog is enabled,
9081 * and the stack is notified that the interface is ready.
9083 * Returns 0 on success, negative value on failure
9085 int ice_open(struct net_device *netdev)
9087 struct ice_netdev_priv *np = netdev_priv(netdev);
9088 struct ice_pf *pf = np->vsi->back;
9090 if (ice_is_reset_in_progress(pf->state)) {
9091 netdev_err(netdev, "can't open net device while reset is in progress");
9095 return ice_open_internal(netdev);
9099 * ice_open_internal - Called when a network interface becomes active
9100 * @netdev: network interface device structure
9102 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9105 * Returns 0 on success, negative value on failure
9107 int ice_open_internal(struct net_device *netdev)
9109 struct ice_netdev_priv *np = netdev_priv(netdev);
9110 struct ice_vsi *vsi = np->vsi;
9111 struct ice_pf *pf = vsi->back;
9112 struct ice_port_info *pi;
9115 if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9116 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
9120 netif_carrier_off(netdev);
9122 pi = vsi->port_info;
9123 err = ice_update_link_info(pi);
9125 netdev_err(netdev, "Failed to get link info, error %d\n", err);
9129 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9131 /* Set PHY if there is media, otherwise, turn off PHY */
9132 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9133 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9134 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9135 err = ice_init_phy_user_cfg(pi);
9137 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9143 err = ice_configure_phy(vsi);
9145 netdev_err(netdev, "Failed to set physical link up, error %d\n",
9150 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9151 ice_set_link(vsi, false);
9154 err = ice_vsi_open(vsi);
9156 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9157 vsi->vsi_num, vsi->vsw->sw_id);
9159 /* Update existing tunnels information */
9160 udp_tunnel_get_rx_info(netdev);
9166 * ice_stop - Disables a network interface
9167 * @netdev: network interface device structure
9169 * The stop entry point is called when an interface is de-activated by the OS,
9170 * and the netdevice enters the DOWN state. The hardware is still under the
9171 * driver's control, but the netdev interface is disabled.
9173 * Returns success only - not allowed to fail
9175 int ice_stop(struct net_device *netdev)
9177 struct ice_netdev_priv *np = netdev_priv(netdev);
9178 struct ice_vsi *vsi = np->vsi;
9179 struct ice_pf *pf = vsi->back;
9181 if (ice_is_reset_in_progress(pf->state)) {
9182 netdev_err(netdev, "can't stop net device while reset is in progress");
9186 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9187 int link_err = ice_force_phys_link_state(vsi, false);
9190 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9191 vsi->vsi_num, link_err);
9202 * ice_features_check - Validate encapsulated packet conforms to limits
9204 * @netdev: This port's netdev
9205 * @features: Offload features that the stack believes apply
9207 static netdev_features_t
9208 ice_features_check(struct sk_buff *skb,
9209 struct net_device __always_unused *netdev,
9210 netdev_features_t features)
9212 bool gso = skb_is_gso(skb);
9215 /* No point in doing any of this if neither checksum nor GSO are
9216 * being requested for this frame. We can rule out both by just
9217 * checking for CHECKSUM_PARTIAL
9219 if (skb->ip_summed != CHECKSUM_PARTIAL)
9222 /* We cannot support GSO if the MSS is going to be less than
9223 * 64 bytes. If it is then we need to drop support for GSO.
9225 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9226 features &= ~NETIF_F_GSO_MASK;
9228 len = skb_network_offset(skb);
9229 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9230 goto out_rm_features;
9232 len = skb_network_header_len(skb);
9233 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9234 goto out_rm_features;
9236 if (skb->encapsulation) {
9237 /* this must work for VXLAN frames AND IPIP/SIT frames, and in
9238 * the case of IPIP frames, the transport header pointer is
9239 * after the inner header! So check to make sure that this
9240 * is a GRE or UDP_TUNNEL frame before doing that math.
9242 if (gso && (skb_shinfo(skb)->gso_type &
9243 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9244 len = skb_inner_network_header(skb) -
9245 skb_transport_header(skb);
9246 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9247 goto out_rm_features;
9250 len = skb_inner_network_header_len(skb);
9251 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9252 goto out_rm_features;
9257 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9260 static const struct net_device_ops ice_netdev_safe_mode_ops = {
9261 .ndo_open = ice_open,
9262 .ndo_stop = ice_stop,
9263 .ndo_start_xmit = ice_start_xmit,
9264 .ndo_set_mac_address = ice_set_mac_address,
9265 .ndo_validate_addr = eth_validate_addr,
9266 .ndo_change_mtu = ice_change_mtu,
9267 .ndo_get_stats64 = ice_get_stats64,
9268 .ndo_tx_timeout = ice_tx_timeout,
9269 .ndo_bpf = ice_xdp_safe_mode,
9272 static const struct net_device_ops ice_netdev_ops = {
9273 .ndo_open = ice_open,
9274 .ndo_stop = ice_stop,
9275 .ndo_start_xmit = ice_start_xmit,
9276 .ndo_select_queue = ice_select_queue,
9277 .ndo_features_check = ice_features_check,
9278 .ndo_fix_features = ice_fix_features,
9279 .ndo_set_rx_mode = ice_set_rx_mode,
9280 .ndo_set_mac_address = ice_set_mac_address,
9281 .ndo_validate_addr = eth_validate_addr,
9282 .ndo_change_mtu = ice_change_mtu,
9283 .ndo_get_stats64 = ice_get_stats64,
9284 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
9285 .ndo_eth_ioctl = ice_eth_ioctl,
9286 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9287 .ndo_set_vf_mac = ice_set_vf_mac,
9288 .ndo_get_vf_config = ice_get_vf_cfg,
9289 .ndo_set_vf_trust = ice_set_vf_trust,
9290 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
9291 .ndo_set_vf_link_state = ice_set_vf_link_state,
9292 .ndo_get_vf_stats = ice_get_vf_stats,
9293 .ndo_set_vf_rate = ice_set_vf_bw,
9294 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9295 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9296 .ndo_setup_tc = ice_setup_tc,
9297 .ndo_set_features = ice_set_features,
9298 .ndo_bridge_getlink = ice_bridge_getlink,
9299 .ndo_bridge_setlink = ice_bridge_setlink,
9300 .ndo_fdb_add = ice_fdb_add,
9301 .ndo_fdb_del = ice_fdb_del,
9302 #ifdef CONFIG_RFS_ACCEL
9303 .ndo_rx_flow_steer = ice_rx_flow_steer,
9305 .ndo_tx_timeout = ice_tx_timeout,
9307 .ndo_xdp_xmit = ice_xdp_xmit,
9308 .ndo_xsk_wakeup = ice_xsk_wakeup,