1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2021 Intel Corporation. */
4 #include <generated/utsrelease.h>
5 #include <linux/crash_dump.h>
6 #include <linux/if_bridge.h>
7 #include <linux/if_macvlan.h>
8 #include <linux/module.h>
9 #include <net/pkt_cls.h>
10 #include <net/xdp_sock_drv.h>
14 #include "i40e_devids.h"
15 #include "i40e_diag.h"
16 #include "i40e_lan_hmc.h"
17 #include "i40e_virtchnl_pf.h"
20 /* All i40e tracepoints are defined by the include below, which
21 * must be included exactly once across the whole kernel with
22 * CREATE_TRACE_POINTS defined
24 #define CREATE_TRACE_POINTS
25 #include "i40e_trace.h"
27 const char i40e_driver_name[] = "i40e";
28 static const char i40e_driver_string[] =
29 "Intel(R) Ethernet Connection XL710 Network Driver";
31 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
33 /* a bit of forward declarations */
34 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
35 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
36 static int i40e_add_vsi(struct i40e_vsi *vsi);
37 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
38 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
39 static int i40e_setup_misc_vector(struct i40e_pf *pf);
40 static void i40e_determine_queue_usage(struct i40e_pf *pf);
41 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
42 static void i40e_prep_for_reset(struct i40e_pf *pf);
43 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
45 static int i40e_reset(struct i40e_pf *pf);
46 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
47 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
48 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
49 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
50 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
51 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
52 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
53 static int i40e_get_capabilities(struct i40e_pf *pf,
54 enum i40e_admin_queue_opc list_type);
55 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
57 /* i40e_pci_tbl - PCI Device ID Table
59 * Last entry must be all 0s
61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
62 * Class, Class Mask, private data (not used) }
64 static const struct pci_device_id i40e_pci_tbl[] = {
65 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
66 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_BC), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
91 /* required last entry */
94 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
96 #define I40E_MAX_VF_COUNT 128
97 static int debug = -1;
98 module_param(debug, uint, 0);
99 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
101 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
102 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
103 MODULE_LICENSE("GPL v2");
105 static struct workqueue_struct *i40e_wq;
107 static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
108 struct net_device *netdev, int delta)
110 struct netdev_hw_addr_list *ha_list;
111 struct netdev_hw_addr *ha;
116 if (is_unicast_ether_addr(f->macaddr) || is_link_local_ether_addr(f->macaddr))
117 ha_list = &netdev->uc;
119 ha_list = &netdev->mc;
121 netdev_hw_addr_list_for_each(ha, ha_list) {
122 if (ether_addr_equal(ha->addr, f->macaddr)) {
123 ha->refcount += delta;
124 if (ha->refcount <= 0)
132 * i40e_hw_to_dev - get device pointer from the hardware structure
133 * @hw: pointer to the device HW structure
135 struct device *i40e_hw_to_dev(struct i40e_hw *hw)
137 struct i40e_pf *pf = i40e_hw_to_pf(hw);
139 return &pf->pdev->dev;
143 * i40e_allocate_dma_mem - OS specific memory alloc for shared code
144 * @hw: pointer to the HW structure
145 * @mem: ptr to mem struct to fill out
146 * @size: size of memory requested
147 * @alignment: what to align the allocation to
149 int i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
150 u64 size, u32 alignment)
152 struct i40e_pf *pf = i40e_hw_to_pf(hw);
154 mem->size = ALIGN(size, alignment);
155 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
164 * i40e_free_dma_mem - OS specific memory free for shared code
165 * @hw: pointer to the HW structure
166 * @mem: ptr to mem struct to free
168 int i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem)
170 struct i40e_pf *pf = i40e_hw_to_pf(hw);
172 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
181 * i40e_allocate_virt_mem - OS specific memory alloc for shared code
182 * @hw: pointer to the HW structure
183 * @mem: ptr to mem struct to fill out
184 * @size: size of memory requested
186 int i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem,
190 mem->va = kzalloc(size, GFP_KERNEL);
199 * i40e_free_virt_mem - OS specific memory free for shared code
200 * @hw: pointer to the HW structure
201 * @mem: ptr to mem struct to free
203 int i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem)
205 /* it's ok to kfree a NULL pointer */
214 * i40e_get_lump - find a lump of free generic resource
215 * @pf: board private structure
216 * @pile: the pile of resource to search
217 * @needed: the number of items needed
218 * @id: an owner id to stick on the items assigned
220 * Returns the base item index of the lump, or negative for error
222 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
228 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
229 dev_info(&pf->pdev->dev,
230 "param err: pile=%s needed=%d id=0x%04x\n",
231 pile ? "<valid>" : "<null>", needed, id);
235 /* Allocate last queue in the pile for FDIR VSI queue
236 * so it doesn't fragment the qp_pile
238 if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
239 if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
240 dev_err(&pf->pdev->dev,
241 "Cannot allocate queue %d for I40E_VSI_FDIR\n",
242 pile->num_entries - 1);
245 pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
246 return pile->num_entries - 1;
250 while (i < pile->num_entries) {
251 /* skip already allocated entries */
252 if (pile->list[i] & I40E_PILE_VALID_BIT) {
257 /* do we have enough in this lump? */
258 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
259 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
264 /* there was enough, so assign it to the requestor */
265 for (j = 0; j < needed; j++)
266 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
271 /* not enough, so skip over it and continue looking */
279 * i40e_put_lump - return a lump of generic resource
280 * @pile: the pile of resource to search
281 * @index: the base item index
282 * @id: the owner id of the items assigned
284 * Returns the count of items in the lump
286 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
288 int valid_id = (id | I40E_PILE_VALID_BIT);
292 if (!pile || index >= pile->num_entries)
296 i < pile->num_entries && pile->list[i] == valid_id;
307 * i40e_find_vsi_from_id - searches for the vsi with the given id
308 * @pf: the pf structure to search for the vsi
309 * @id: id of the vsi it is searching for
311 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
315 for (i = 0; i < pf->num_alloc_vsi; i++)
316 if (pf->vsi[i] && (pf->vsi[i]->id == id))
323 * i40e_service_event_schedule - Schedule the service task to wake up
324 * @pf: board private structure
326 * If not already scheduled, this puts the task into the work queue
328 void i40e_service_event_schedule(struct i40e_pf *pf)
330 if ((!test_bit(__I40E_DOWN, pf->state) &&
331 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
332 test_bit(__I40E_RECOVERY_MODE, pf->state))
333 queue_work(i40e_wq, &pf->service_task);
337 * i40e_tx_timeout - Respond to a Tx Hang
338 * @netdev: network interface device structure
339 * @txqueue: queue number timing out
341 * If any port has noticed a Tx timeout, it is likely that the whole
342 * device is munged, not just the one netdev port, so go for the full
345 static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
347 struct i40e_netdev_priv *np = netdev_priv(netdev);
348 struct i40e_vsi *vsi = np->vsi;
349 struct i40e_pf *pf = vsi->back;
350 struct i40e_ring *tx_ring = NULL;
354 pf->tx_timeout_count++;
356 /* with txqueue index, find the tx_ring struct */
357 for (i = 0; i < vsi->num_queue_pairs; i++) {
358 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
360 vsi->tx_rings[i]->queue_index) {
361 tx_ring = vsi->tx_rings[i];
367 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
368 pf->tx_timeout_recovery_level = 1; /* reset after some time */
369 else if (time_before(jiffies,
370 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
371 return; /* don't do any new action before the next timeout */
373 /* don't kick off another recovery if one is already pending */
374 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
378 head = i40e_get_head(tx_ring);
379 /* Read interrupt register */
380 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
382 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
383 tx_ring->vsi->base_vector - 1));
385 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
387 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
388 vsi->seid, txqueue, tx_ring->next_to_clean,
389 head, tx_ring->next_to_use,
390 readl(tx_ring->tail), val);
393 pf->tx_timeout_last_recovery = jiffies;
394 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
395 pf->tx_timeout_recovery_level, txqueue);
397 switch (pf->tx_timeout_recovery_level) {
399 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
402 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
405 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
408 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
409 set_bit(__I40E_DOWN_REQUESTED, pf->state);
410 set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
414 i40e_service_event_schedule(pf);
415 pf->tx_timeout_recovery_level++;
419 * i40e_get_vsi_stats_struct - Get System Network Statistics
420 * @vsi: the VSI we care about
422 * Returns the address of the device statistics structure.
423 * The statistics are actually updated from the service task.
425 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
427 return &vsi->net_stats;
431 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
432 * @ring: Tx ring to get statistics from
433 * @stats: statistics entry to be updated
435 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
436 struct rtnl_link_stats64 *stats)
442 start = u64_stats_fetch_begin(&ring->syncp);
443 packets = ring->stats.packets;
444 bytes = ring->stats.bytes;
445 } while (u64_stats_fetch_retry(&ring->syncp, start));
447 stats->tx_packets += packets;
448 stats->tx_bytes += bytes;
452 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
453 * @netdev: network interface device structure
454 * @stats: data structure to store statistics
456 * Returns the address of the device statistics structure.
457 * The statistics are actually updated from the service task.
459 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
460 struct rtnl_link_stats64 *stats)
462 struct i40e_netdev_priv *np = netdev_priv(netdev);
463 struct i40e_vsi *vsi = np->vsi;
464 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
465 struct i40e_ring *ring;
468 if (test_bit(__I40E_VSI_DOWN, vsi->state))
475 for (i = 0; i < vsi->num_queue_pairs; i++) {
479 ring = READ_ONCE(vsi->tx_rings[i]);
482 i40e_get_netdev_stats_struct_tx(ring, stats);
484 if (i40e_enabled_xdp_vsi(vsi)) {
485 ring = READ_ONCE(vsi->xdp_rings[i]);
488 i40e_get_netdev_stats_struct_tx(ring, stats);
491 ring = READ_ONCE(vsi->rx_rings[i]);
495 start = u64_stats_fetch_begin(&ring->syncp);
496 packets = ring->stats.packets;
497 bytes = ring->stats.bytes;
498 } while (u64_stats_fetch_retry(&ring->syncp, start));
500 stats->rx_packets += packets;
501 stats->rx_bytes += bytes;
506 /* following stats updated by i40e_watchdog_subtask() */
507 stats->multicast = vsi_stats->multicast;
508 stats->tx_errors = vsi_stats->tx_errors;
509 stats->tx_dropped = vsi_stats->tx_dropped;
510 stats->rx_errors = vsi_stats->rx_errors;
511 stats->rx_dropped = vsi_stats->rx_dropped;
512 stats->rx_missed_errors = vsi_stats->rx_missed_errors;
513 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
514 stats->rx_length_errors = vsi_stats->rx_length_errors;
518 * i40e_vsi_reset_stats - Resets all stats of the given vsi
519 * @vsi: the VSI to have its stats reset
521 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
523 struct rtnl_link_stats64 *ns;
529 ns = i40e_get_vsi_stats_struct(vsi);
530 memset(ns, 0, sizeof(*ns));
531 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
532 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
533 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
534 if (vsi->rx_rings && vsi->rx_rings[0]) {
535 for (i = 0; i < vsi->num_queue_pairs; i++) {
536 memset(&vsi->rx_rings[i]->stats, 0,
537 sizeof(vsi->rx_rings[i]->stats));
538 memset(&vsi->rx_rings[i]->rx_stats, 0,
539 sizeof(vsi->rx_rings[i]->rx_stats));
540 memset(&vsi->tx_rings[i]->stats, 0,
541 sizeof(vsi->tx_rings[i]->stats));
542 memset(&vsi->tx_rings[i]->tx_stats, 0,
543 sizeof(vsi->tx_rings[i]->tx_stats));
546 vsi->stat_offsets_loaded = false;
550 * i40e_pf_reset_stats - Reset all of the stats for the given PF
551 * @pf: the PF to be reset
553 void i40e_pf_reset_stats(struct i40e_pf *pf)
557 memset(&pf->stats, 0, sizeof(pf->stats));
558 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
559 pf->stat_offsets_loaded = false;
561 for (i = 0; i < I40E_MAX_VEB; i++) {
563 memset(&pf->veb[i]->stats, 0,
564 sizeof(pf->veb[i]->stats));
565 memset(&pf->veb[i]->stats_offsets, 0,
566 sizeof(pf->veb[i]->stats_offsets));
567 memset(&pf->veb[i]->tc_stats, 0,
568 sizeof(pf->veb[i]->tc_stats));
569 memset(&pf->veb[i]->tc_stats_offsets, 0,
570 sizeof(pf->veb[i]->tc_stats_offsets));
571 pf->veb[i]->stat_offsets_loaded = false;
574 pf->hw_csum_rx_error = 0;
578 * i40e_compute_pci_to_hw_id - compute index form PCI function.
579 * @vsi: ptr to the VSI to read from.
580 * @hw: ptr to the hardware info.
582 static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw)
584 int pf_count = i40e_get_pf_count(hw);
586 if (vsi->type == I40E_VSI_SRIOV)
587 return (hw->port * BIT(7)) / pf_count + vsi->vf_id;
589 return hw->port + BIT(7);
593 * i40e_stat_update64 - read and update a 64 bit stat from the chip.
594 * @hw: ptr to the hardware info.
595 * @hireg: the high 32 bit reg to read.
596 * @loreg: the low 32 bit reg to read.
597 * @offset_loaded: has the initial offset been loaded yet.
598 * @offset: ptr to current offset value.
599 * @stat: ptr to the stat.
601 * Since the device stats are not reset at PFReset, they will not
602 * be zeroed when the driver starts. We'll save the first values read
603 * and use them as offsets to be subtracted from the raw values in order
604 * to report stats that count from zero.
606 static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg,
607 bool offset_loaded, u64 *offset, u64 *stat)
611 new_data = rd64(hw, loreg);
613 if (!offset_loaded || new_data < *offset)
615 *stat = new_data - *offset;
619 * i40e_stat_update48 - read and update a 48 bit stat from the chip
620 * @hw: ptr to the hardware info
621 * @hireg: the high 32 bit reg to read
622 * @loreg: the low 32 bit reg to read
623 * @offset_loaded: has the initial offset been loaded yet
624 * @offset: ptr to current offset value
625 * @stat: ptr to the stat
627 * Since the device stats are not reset at PFReset, they likely will not
628 * be zeroed when the driver starts. We'll save the first values read
629 * and use them as offsets to be subtracted from the raw values in order
630 * to report stats that count from zero. In the process, we also manage
631 * the potential roll-over.
633 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
634 bool offset_loaded, u64 *offset, u64 *stat)
638 if (hw->device_id == I40E_DEV_ID_QEMU) {
639 new_data = rd32(hw, loreg);
640 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
642 new_data = rd64(hw, loreg);
646 if (likely(new_data >= *offset))
647 *stat = new_data - *offset;
649 *stat = (new_data + BIT_ULL(48)) - *offset;
650 *stat &= 0xFFFFFFFFFFFFULL;
654 * i40e_stat_update32 - read and update a 32 bit stat from the chip
655 * @hw: ptr to the hardware info
656 * @reg: the hw reg to read
657 * @offset_loaded: has the initial offset been loaded yet
658 * @offset: ptr to current offset value
659 * @stat: ptr to the stat
661 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
662 bool offset_loaded, u64 *offset, u64 *stat)
666 new_data = rd32(hw, reg);
669 if (likely(new_data >= *offset))
670 *stat = (u32)(new_data - *offset);
672 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
676 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
677 * @hw: ptr to the hardware info
678 * @reg: the hw reg to read and clear
679 * @stat: ptr to the stat
681 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
683 u32 new_data = rd32(hw, reg);
685 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
690 * i40e_stats_update_rx_discards - update rx_discards.
691 * @vsi: ptr to the VSI to be updated.
692 * @hw: ptr to the hardware info.
693 * @stat_idx: VSI's stat_counter_idx.
694 * @offset_loaded: ptr to the VSI's stat_offsets_loaded.
695 * @stat_offset: ptr to stat_offset to store first read of specific register.
696 * @stat: ptr to VSI's stat to be updated.
699 i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw,
700 int stat_idx, bool offset_loaded,
701 struct i40e_eth_stats *stat_offset,
702 struct i40e_eth_stats *stat)
704 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
705 &stat_offset->rx_discards, &stat->rx_discards);
706 i40e_stat_update64(hw,
707 I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
708 I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
709 offset_loaded, &stat_offset->rx_discards_other,
710 &stat->rx_discards_other);
714 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
715 * @vsi: the VSI to be updated
717 void i40e_update_eth_stats(struct i40e_vsi *vsi)
719 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
720 struct i40e_pf *pf = vsi->back;
721 struct i40e_hw *hw = &pf->hw;
722 struct i40e_eth_stats *oes;
723 struct i40e_eth_stats *es; /* device's eth stats */
725 es = &vsi->eth_stats;
726 oes = &vsi->eth_stats_offsets;
728 /* Gather up the stats that the hw collects */
729 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
730 vsi->stat_offsets_loaded,
731 &oes->tx_errors, &es->tx_errors);
732 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
733 vsi->stat_offsets_loaded,
734 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
736 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
737 I40E_GLV_GORCL(stat_idx),
738 vsi->stat_offsets_loaded,
739 &oes->rx_bytes, &es->rx_bytes);
740 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
741 I40E_GLV_UPRCL(stat_idx),
742 vsi->stat_offsets_loaded,
743 &oes->rx_unicast, &es->rx_unicast);
744 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
745 I40E_GLV_MPRCL(stat_idx),
746 vsi->stat_offsets_loaded,
747 &oes->rx_multicast, &es->rx_multicast);
748 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
749 I40E_GLV_BPRCL(stat_idx),
750 vsi->stat_offsets_loaded,
751 &oes->rx_broadcast, &es->rx_broadcast);
753 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
754 I40E_GLV_GOTCL(stat_idx),
755 vsi->stat_offsets_loaded,
756 &oes->tx_bytes, &es->tx_bytes);
757 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
758 I40E_GLV_UPTCL(stat_idx),
759 vsi->stat_offsets_loaded,
760 &oes->tx_unicast, &es->tx_unicast);
761 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
762 I40E_GLV_MPTCL(stat_idx),
763 vsi->stat_offsets_loaded,
764 &oes->tx_multicast, &es->tx_multicast);
765 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
766 I40E_GLV_BPTCL(stat_idx),
767 vsi->stat_offsets_loaded,
768 &oes->tx_broadcast, &es->tx_broadcast);
770 i40e_stats_update_rx_discards(vsi, hw, stat_idx,
771 vsi->stat_offsets_loaded, oes, es);
773 vsi->stat_offsets_loaded = true;
777 * i40e_update_veb_stats - Update Switch component statistics
778 * @veb: the VEB being updated
780 void i40e_update_veb_stats(struct i40e_veb *veb)
782 struct i40e_pf *pf = veb->pf;
783 struct i40e_hw *hw = &pf->hw;
784 struct i40e_eth_stats *oes;
785 struct i40e_eth_stats *es; /* device's eth stats */
786 struct i40e_veb_tc_stats *veb_oes;
787 struct i40e_veb_tc_stats *veb_es;
790 idx = veb->stats_idx;
792 oes = &veb->stats_offsets;
793 veb_es = &veb->tc_stats;
794 veb_oes = &veb->tc_stats_offsets;
796 /* Gather up the stats that the hw collects */
797 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
798 veb->stat_offsets_loaded,
799 &oes->tx_discards, &es->tx_discards);
800 if (hw->revision_id > 0)
801 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
802 veb->stat_offsets_loaded,
803 &oes->rx_unknown_protocol,
804 &es->rx_unknown_protocol);
805 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
806 veb->stat_offsets_loaded,
807 &oes->rx_bytes, &es->rx_bytes);
808 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
809 veb->stat_offsets_loaded,
810 &oes->rx_unicast, &es->rx_unicast);
811 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
812 veb->stat_offsets_loaded,
813 &oes->rx_multicast, &es->rx_multicast);
814 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
815 veb->stat_offsets_loaded,
816 &oes->rx_broadcast, &es->rx_broadcast);
818 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
819 veb->stat_offsets_loaded,
820 &oes->tx_bytes, &es->tx_bytes);
821 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
822 veb->stat_offsets_loaded,
823 &oes->tx_unicast, &es->tx_unicast);
824 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
825 veb->stat_offsets_loaded,
826 &oes->tx_multicast, &es->tx_multicast);
827 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
828 veb->stat_offsets_loaded,
829 &oes->tx_broadcast, &es->tx_broadcast);
830 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
831 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
832 I40E_GLVEBTC_RPCL(i, idx),
833 veb->stat_offsets_loaded,
834 &veb_oes->tc_rx_packets[i],
835 &veb_es->tc_rx_packets[i]);
836 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
837 I40E_GLVEBTC_RBCL(i, idx),
838 veb->stat_offsets_loaded,
839 &veb_oes->tc_rx_bytes[i],
840 &veb_es->tc_rx_bytes[i]);
841 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
842 I40E_GLVEBTC_TPCL(i, idx),
843 veb->stat_offsets_loaded,
844 &veb_oes->tc_tx_packets[i],
845 &veb_es->tc_tx_packets[i]);
846 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
847 I40E_GLVEBTC_TBCL(i, idx),
848 veb->stat_offsets_loaded,
849 &veb_oes->tc_tx_bytes[i],
850 &veb_es->tc_tx_bytes[i]);
852 veb->stat_offsets_loaded = true;
856 * i40e_update_vsi_stats - Update the vsi statistics counters.
857 * @vsi: the VSI to be updated
859 * There are a few instances where we store the same stat in a
860 * couple of different structs. This is partly because we have
861 * the netdev stats that need to be filled out, which is slightly
862 * different from the "eth_stats" defined by the chip and used in
863 * VF communications. We sort it out here.
865 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
867 u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
868 struct i40e_pf *pf = vsi->back;
869 struct rtnl_link_stats64 *ons;
870 struct rtnl_link_stats64 *ns; /* netdev stats */
871 struct i40e_eth_stats *oes;
872 struct i40e_eth_stats *es; /* device's eth stats */
873 u64 tx_restart, tx_busy;
884 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
885 test_bit(__I40E_CONFIG_BUSY, pf->state))
888 ns = i40e_get_vsi_stats_struct(vsi);
889 ons = &vsi->net_stats_offsets;
890 es = &vsi->eth_stats;
891 oes = &vsi->eth_stats_offsets;
893 /* Gather up the netdev and vsi stats that the driver collects
894 * on the fly during packet processing
898 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
907 for (q = 0; q < vsi->num_queue_pairs; q++) {
909 p = READ_ONCE(vsi->tx_rings[q]);
914 start = u64_stats_fetch_begin(&p->syncp);
915 packets = p->stats.packets;
916 bytes = p->stats.bytes;
917 } while (u64_stats_fetch_retry(&p->syncp, start));
920 tx_restart += p->tx_stats.restart_queue;
921 tx_busy += p->tx_stats.tx_busy;
922 tx_linearize += p->tx_stats.tx_linearize;
923 tx_force_wb += p->tx_stats.tx_force_wb;
924 tx_stopped += p->tx_stats.tx_stopped;
927 p = READ_ONCE(vsi->rx_rings[q]);
932 start = u64_stats_fetch_begin(&p->syncp);
933 packets = p->stats.packets;
934 bytes = p->stats.bytes;
935 } while (u64_stats_fetch_retry(&p->syncp, start));
938 rx_buf += p->rx_stats.alloc_buff_failed;
939 rx_page += p->rx_stats.alloc_page_failed;
940 rx_reuse += p->rx_stats.page_reuse_count;
941 rx_alloc += p->rx_stats.page_alloc_count;
942 rx_waive += p->rx_stats.page_waive_count;
943 rx_busy += p->rx_stats.page_busy_count;
945 if (i40e_enabled_xdp_vsi(vsi)) {
946 /* locate XDP ring */
947 p = READ_ONCE(vsi->xdp_rings[q]);
952 start = u64_stats_fetch_begin(&p->syncp);
953 packets = p->stats.packets;
954 bytes = p->stats.bytes;
955 } while (u64_stats_fetch_retry(&p->syncp, start));
958 tx_restart += p->tx_stats.restart_queue;
959 tx_busy += p->tx_stats.tx_busy;
960 tx_linearize += p->tx_stats.tx_linearize;
961 tx_force_wb += p->tx_stats.tx_force_wb;
965 vsi->tx_restart = tx_restart;
966 vsi->tx_busy = tx_busy;
967 vsi->tx_linearize = tx_linearize;
968 vsi->tx_force_wb = tx_force_wb;
969 vsi->tx_stopped = tx_stopped;
970 vsi->rx_page_failed = rx_page;
971 vsi->rx_buf_failed = rx_buf;
972 vsi->rx_page_reuse = rx_reuse;
973 vsi->rx_page_alloc = rx_alloc;
974 vsi->rx_page_waive = rx_waive;
975 vsi->rx_page_busy = rx_busy;
977 ns->rx_packets = rx_p;
979 ns->tx_packets = tx_p;
982 /* update netdev stats from eth stats */
983 i40e_update_eth_stats(vsi);
984 ons->tx_errors = oes->tx_errors;
985 ns->tx_errors = es->tx_errors;
986 ons->multicast = oes->rx_multicast;
987 ns->multicast = es->rx_multicast;
988 ons->rx_dropped = oes->rx_discards_other;
989 ns->rx_dropped = es->rx_discards_other;
990 ons->rx_missed_errors = oes->rx_discards;
991 ns->rx_missed_errors = es->rx_discards;
992 ons->tx_dropped = oes->tx_discards;
993 ns->tx_dropped = es->tx_discards;
995 /* pull in a couple PF stats if this is the main vsi */
996 if (vsi == pf->vsi[pf->lan_vsi]) {
997 ns->rx_crc_errors = pf->stats.crc_errors;
998 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
999 ns->rx_length_errors = pf->stats.rx_length_errors;
1004 * i40e_update_pf_stats - Update the PF statistics counters.
1005 * @pf: the PF to be updated
1007 static void i40e_update_pf_stats(struct i40e_pf *pf)
1009 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
1010 struct i40e_hw_port_stats *nsd = &pf->stats;
1011 struct i40e_hw *hw = &pf->hw;
1015 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
1016 I40E_GLPRT_GORCL(hw->port),
1017 pf->stat_offsets_loaded,
1018 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
1019 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
1020 I40E_GLPRT_GOTCL(hw->port),
1021 pf->stat_offsets_loaded,
1022 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
1023 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
1024 pf->stat_offsets_loaded,
1025 &osd->eth.rx_discards,
1026 &nsd->eth.rx_discards);
1027 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
1028 I40E_GLPRT_UPRCL(hw->port),
1029 pf->stat_offsets_loaded,
1030 &osd->eth.rx_unicast,
1031 &nsd->eth.rx_unicast);
1032 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
1033 I40E_GLPRT_MPRCL(hw->port),
1034 pf->stat_offsets_loaded,
1035 &osd->eth.rx_multicast,
1036 &nsd->eth.rx_multicast);
1037 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
1038 I40E_GLPRT_BPRCL(hw->port),
1039 pf->stat_offsets_loaded,
1040 &osd->eth.rx_broadcast,
1041 &nsd->eth.rx_broadcast);
1042 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1043 I40E_GLPRT_UPTCL(hw->port),
1044 pf->stat_offsets_loaded,
1045 &osd->eth.tx_unicast,
1046 &nsd->eth.tx_unicast);
1047 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1048 I40E_GLPRT_MPTCL(hw->port),
1049 pf->stat_offsets_loaded,
1050 &osd->eth.tx_multicast,
1051 &nsd->eth.tx_multicast);
1052 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1053 I40E_GLPRT_BPTCL(hw->port),
1054 pf->stat_offsets_loaded,
1055 &osd->eth.tx_broadcast,
1056 &nsd->eth.tx_broadcast);
1058 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
1059 pf->stat_offsets_loaded,
1060 &osd->tx_dropped_link_down,
1061 &nsd->tx_dropped_link_down);
1063 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1064 pf->stat_offsets_loaded,
1065 &osd->crc_errors, &nsd->crc_errors);
1067 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1068 pf->stat_offsets_loaded,
1069 &osd->illegal_bytes, &nsd->illegal_bytes);
1071 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
1072 pf->stat_offsets_loaded,
1073 &osd->mac_local_faults,
1074 &nsd->mac_local_faults);
1075 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
1076 pf->stat_offsets_loaded,
1077 &osd->mac_remote_faults,
1078 &nsd->mac_remote_faults);
1080 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
1081 pf->stat_offsets_loaded,
1082 &osd->rx_length_errors,
1083 &nsd->rx_length_errors);
1085 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
1086 pf->stat_offsets_loaded,
1087 &osd->link_xon_rx, &nsd->link_xon_rx);
1088 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
1089 pf->stat_offsets_loaded,
1090 &osd->link_xon_tx, &nsd->link_xon_tx);
1091 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1092 pf->stat_offsets_loaded,
1093 &osd->link_xoff_rx, &nsd->link_xoff_rx);
1094 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1095 pf->stat_offsets_loaded,
1096 &osd->link_xoff_tx, &nsd->link_xoff_tx);
1098 for (i = 0; i < 8; i++) {
1099 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1100 pf->stat_offsets_loaded,
1101 &osd->priority_xoff_rx[i],
1102 &nsd->priority_xoff_rx[i]);
1103 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1104 pf->stat_offsets_loaded,
1105 &osd->priority_xon_rx[i],
1106 &nsd->priority_xon_rx[i]);
1107 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1108 pf->stat_offsets_loaded,
1109 &osd->priority_xon_tx[i],
1110 &nsd->priority_xon_tx[i]);
1111 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1112 pf->stat_offsets_loaded,
1113 &osd->priority_xoff_tx[i],
1114 &nsd->priority_xoff_tx[i]);
1115 i40e_stat_update32(hw,
1116 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1117 pf->stat_offsets_loaded,
1118 &osd->priority_xon_2_xoff[i],
1119 &nsd->priority_xon_2_xoff[i]);
1122 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1123 I40E_GLPRT_PRC64L(hw->port),
1124 pf->stat_offsets_loaded,
1125 &osd->rx_size_64, &nsd->rx_size_64);
1126 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1127 I40E_GLPRT_PRC127L(hw->port),
1128 pf->stat_offsets_loaded,
1129 &osd->rx_size_127, &nsd->rx_size_127);
1130 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1131 I40E_GLPRT_PRC255L(hw->port),
1132 pf->stat_offsets_loaded,
1133 &osd->rx_size_255, &nsd->rx_size_255);
1134 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1135 I40E_GLPRT_PRC511L(hw->port),
1136 pf->stat_offsets_loaded,
1137 &osd->rx_size_511, &nsd->rx_size_511);
1138 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1139 I40E_GLPRT_PRC1023L(hw->port),
1140 pf->stat_offsets_loaded,
1141 &osd->rx_size_1023, &nsd->rx_size_1023);
1142 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1143 I40E_GLPRT_PRC1522L(hw->port),
1144 pf->stat_offsets_loaded,
1145 &osd->rx_size_1522, &nsd->rx_size_1522);
1146 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1147 I40E_GLPRT_PRC9522L(hw->port),
1148 pf->stat_offsets_loaded,
1149 &osd->rx_size_big, &nsd->rx_size_big);
1151 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1152 I40E_GLPRT_PTC64L(hw->port),
1153 pf->stat_offsets_loaded,
1154 &osd->tx_size_64, &nsd->tx_size_64);
1155 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1156 I40E_GLPRT_PTC127L(hw->port),
1157 pf->stat_offsets_loaded,
1158 &osd->tx_size_127, &nsd->tx_size_127);
1159 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1160 I40E_GLPRT_PTC255L(hw->port),
1161 pf->stat_offsets_loaded,
1162 &osd->tx_size_255, &nsd->tx_size_255);
1163 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1164 I40E_GLPRT_PTC511L(hw->port),
1165 pf->stat_offsets_loaded,
1166 &osd->tx_size_511, &nsd->tx_size_511);
1167 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1168 I40E_GLPRT_PTC1023L(hw->port),
1169 pf->stat_offsets_loaded,
1170 &osd->tx_size_1023, &nsd->tx_size_1023);
1171 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1172 I40E_GLPRT_PTC1522L(hw->port),
1173 pf->stat_offsets_loaded,
1174 &osd->tx_size_1522, &nsd->tx_size_1522);
1175 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1176 I40E_GLPRT_PTC9522L(hw->port),
1177 pf->stat_offsets_loaded,
1178 &osd->tx_size_big, &nsd->tx_size_big);
1180 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1181 pf->stat_offsets_loaded,
1182 &osd->rx_undersize, &nsd->rx_undersize);
1183 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1184 pf->stat_offsets_loaded,
1185 &osd->rx_fragments, &nsd->rx_fragments);
1186 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1187 pf->stat_offsets_loaded,
1188 &osd->rx_oversize, &nsd->rx_oversize);
1189 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1190 pf->stat_offsets_loaded,
1191 &osd->rx_jabber, &nsd->rx_jabber);
1194 i40e_stat_update_and_clear32(hw,
1195 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1196 &nsd->fd_atr_match);
1197 i40e_stat_update_and_clear32(hw,
1198 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1200 i40e_stat_update_and_clear32(hw,
1201 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1202 &nsd->fd_atr_tunnel_match);
1204 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1205 nsd->tx_lpi_status =
1206 FIELD_GET(I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK, val);
1207 nsd->rx_lpi_status =
1208 FIELD_GET(I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK, val);
1209 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1210 pf->stat_offsets_loaded,
1211 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1212 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1213 pf->stat_offsets_loaded,
1214 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1216 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
1217 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1218 nsd->fd_sb_status = true;
1220 nsd->fd_sb_status = false;
1222 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
1223 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1224 nsd->fd_atr_status = true;
1226 nsd->fd_atr_status = false;
1228 pf->stat_offsets_loaded = true;
1232 * i40e_update_stats - Update the various statistics counters.
1233 * @vsi: the VSI to be updated
1235 * Update the various stats for this VSI and its related entities.
1237 void i40e_update_stats(struct i40e_vsi *vsi)
1239 struct i40e_pf *pf = vsi->back;
1241 if (vsi == pf->vsi[pf->lan_vsi])
1242 i40e_update_pf_stats(pf);
1244 i40e_update_vsi_stats(vsi);
1248 * i40e_count_filters - counts VSI mac filters
1249 * @vsi: the VSI to be searched
1251 * Returns count of mac filters
1253 int i40e_count_filters(struct i40e_vsi *vsi)
1255 struct i40e_mac_filter *f;
1256 struct hlist_node *h;
1260 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
1267 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1268 * @vsi: the VSI to be searched
1269 * @macaddr: the MAC address
1272 * Returns ptr to the filter object or NULL
1274 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1275 const u8 *macaddr, s16 vlan)
1277 struct i40e_mac_filter *f;
1280 if (!vsi || !macaddr)
1283 key = i40e_addr_to_hkey(macaddr);
1284 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1285 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1293 * i40e_find_mac - Find a mac addr in the macvlan filters list
1294 * @vsi: the VSI to be searched
1295 * @macaddr: the MAC address we are searching for
1297 * Returns the first filter with the provided MAC address or NULL if
1298 * MAC address was not found
1300 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1302 struct i40e_mac_filter *f;
1305 if (!vsi || !macaddr)
1308 key = i40e_addr_to_hkey(macaddr);
1309 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1310 if ((ether_addr_equal(macaddr, f->macaddr)))
1317 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1318 * @vsi: the VSI to be searched
1320 * Returns true if VSI is in vlan mode or false otherwise
1322 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1324 /* If we have a PVID, always operate in VLAN mode */
1328 /* We need to operate in VLAN mode whenever we have any filters with
1329 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1330 * time, incurring search cost repeatedly. However, we can notice two
1333 * 1) the only place where we can gain a VLAN filter is in
1336 * 2) the only place where filters are actually removed is in
1337 * i40e_sync_filters_subtask.
1339 * Thus, we can simply use a boolean value, has_vlan_filters which we
1340 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1341 * we have to perform the full search after deleting filters in
1342 * i40e_sync_filters_subtask, but we already have to search
1343 * filters here and can perform the check at the same time. This
1344 * results in avoiding embedding a loop for VLAN mode inside another
1345 * loop over all the filters, and should maintain correctness as noted
1348 return vsi->has_vlan_filter;
1352 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1353 * @vsi: the VSI to configure
1354 * @tmp_add_list: list of filters ready to be added
1355 * @tmp_del_list: list of filters ready to be deleted
1356 * @vlan_filters: the number of active VLAN filters
1358 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1359 * behave as expected. If we have any active VLAN filters remaining or about
1360 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1361 * so that they only match against untagged traffic. If we no longer have any
1362 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1363 * so that they match against both tagged and untagged traffic. In this way,
1364 * we ensure that we correctly receive the desired traffic. This ensures that
1365 * when we have an active VLAN we will receive only untagged traffic and
1366 * traffic matching active VLANs. If we have no active VLANs then we will
1367 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1369 * Finally, in a similar fashion, this function also corrects filters when
1370 * there is an active PVID assigned to this VSI.
1372 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1374 * This function is only expected to be called from within
1375 * i40e_sync_vsi_filters.
1377 * NOTE: This function expects to be called while under the
1378 * mac_filter_hash_lock
1380 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1381 struct hlist_head *tmp_add_list,
1382 struct hlist_head *tmp_del_list,
1385 s16 pvid = le16_to_cpu(vsi->info.pvid);
1386 struct i40e_mac_filter *f, *add_head;
1387 struct i40e_new_mac_filter *new;
1388 struct hlist_node *h;
1391 /* To determine if a particular filter needs to be replaced we
1392 * have the three following conditions:
1394 * a) if we have a PVID assigned, then all filters which are
1395 * not marked as VLAN=PVID must be replaced with filters that
1397 * b) otherwise, if we have any active VLANS, all filters
1398 * which are marked as VLAN=-1 must be replaced with
1399 * filters marked as VLAN=0
1400 * c) finally, if we do not have any active VLANS, all filters
1401 * which are marked as VLAN=0 must be replaced with filters
1405 /* Update the filters about to be added in place */
1406 hlist_for_each_entry(new, tmp_add_list, hlist) {
1407 if (pvid && new->f->vlan != pvid)
1408 new->f->vlan = pvid;
1409 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1411 else if (!vlan_filters && new->f->vlan == 0)
1412 new->f->vlan = I40E_VLAN_ANY;
1415 /* Update the remaining active filters */
1416 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1417 /* Combine the checks for whether a filter needs to be changed
1418 * and then determine the new VLAN inside the if block, in
1419 * order to avoid duplicating code for adding the new filter
1420 * then deleting the old filter.
1422 if ((pvid && f->vlan != pvid) ||
1423 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1424 (!vlan_filters && f->vlan == 0)) {
1425 /* Determine the new vlan we will be adding */
1428 else if (vlan_filters)
1431 new_vlan = I40E_VLAN_ANY;
1433 /* Create the new filter */
1434 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1438 /* Create a temporary i40e_new_mac_filter */
1439 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1444 new->state = add_head->state;
1446 /* Add the new filter to the tmp list */
1447 hlist_add_head(&new->hlist, tmp_add_list);
1449 /* Put the original filter into the delete list */
1450 f->state = I40E_FILTER_REMOVE;
1451 hash_del(&f->hlist);
1452 hlist_add_head(&f->hlist, tmp_del_list);
1456 vsi->has_vlan_filter = !!vlan_filters;
1462 * i40e_get_vf_new_vlan - Get new vlan id on a vf
1463 * @vsi: the vsi to configure
1464 * @new_mac: new mac filter to be added
1465 * @f: existing mac filter, replaced with new_mac->f if new_mac is not NULL
1466 * @vlan_filters: the number of active VLAN filters
1467 * @trusted: flag if the VF is trusted
1469 * Get new VLAN id based on current VLAN filters, trust, PVID
1470 * and vf-vlan-prune-disable flag.
1472 * Returns the value of the new vlan filter or
1473 * the old value if no new filter is needed.
1475 static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi,
1476 struct i40e_new_mac_filter *new_mac,
1477 struct i40e_mac_filter *f,
1481 s16 pvid = le16_to_cpu(vsi->info.pvid);
1482 struct i40e_pf *pf = vsi->back;
1488 if (pvid && f->vlan != pvid)
1491 is_any = (trusted ||
1492 !test_bit(I40E_FLAG_VF_VLAN_PRUNING_ENA, pf->flags));
1494 if ((vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1495 (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1496 (is_any && !vlan_filters && f->vlan == 0)) {
1498 return I40E_VLAN_ANY;
1507 * i40e_correct_vf_mac_vlan_filters - Correct non-VLAN VF filters if necessary
1508 * @vsi: the vsi to configure
1509 * @tmp_add_list: list of filters ready to be added
1510 * @tmp_del_list: list of filters ready to be deleted
1511 * @vlan_filters: the number of active VLAN filters
1512 * @trusted: flag if the VF is trusted
1514 * Correct VF VLAN filters based on current VLAN filters, trust, PVID
1515 * and vf-vlan-prune-disable flag.
1517 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1519 * This function is only expected to be called from within
1520 * i40e_sync_vsi_filters.
1522 * NOTE: This function expects to be called while under the
1523 * mac_filter_hash_lock
1525 static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi,
1526 struct hlist_head *tmp_add_list,
1527 struct hlist_head *tmp_del_list,
1531 struct i40e_mac_filter *f, *add_head;
1532 struct i40e_new_mac_filter *new_mac;
1533 struct hlist_node *h;
1536 hlist_for_each_entry(new_mac, tmp_add_list, hlist) {
1537 new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL,
1538 vlan_filters, trusted);
1541 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1542 new_vlan = i40e_get_vf_new_vlan(vsi, NULL, f, vlan_filters,
1544 if (new_vlan != f->vlan) {
1545 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1548 /* Create a temporary i40e_new_mac_filter */
1549 new_mac = kzalloc(sizeof(*new_mac), GFP_ATOMIC);
1552 new_mac->f = add_head;
1553 new_mac->state = add_head->state;
1555 /* Add the new filter to the tmp list */
1556 hlist_add_head(&new_mac->hlist, tmp_add_list);
1558 /* Put the original filter into the delete list */
1559 f->state = I40E_FILTER_REMOVE;
1560 hash_del(&f->hlist);
1561 hlist_add_head(&f->hlist, tmp_del_list);
1565 vsi->has_vlan_filter = !!vlan_filters;
1570 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1571 * @vsi: the PF Main VSI - inappropriate for any other VSI
1572 * @macaddr: the MAC address
1574 * Remove whatever filter the firmware set up so the driver can manage
1575 * its own filtering intelligently.
1577 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1579 struct i40e_aqc_remove_macvlan_element_data element;
1580 struct i40e_pf *pf = vsi->back;
1582 /* Only appropriate for the PF main VSI */
1583 if (vsi->type != I40E_VSI_MAIN)
1586 memset(&element, 0, sizeof(element));
1587 ether_addr_copy(element.mac_addr, macaddr);
1588 element.vlan_tag = 0;
1589 /* Ignore error returns, some firmware does it this way... */
1590 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1591 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1593 memset(&element, 0, sizeof(element));
1594 ether_addr_copy(element.mac_addr, macaddr);
1595 element.vlan_tag = 0;
1596 /* ...and some firmware does it this way. */
1597 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1598 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1599 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1603 * i40e_add_filter - Add a mac/vlan filter to the VSI
1604 * @vsi: the VSI to be searched
1605 * @macaddr: the MAC address
1608 * Returns ptr to the filter object or NULL when no memory available.
1610 * NOTE: This function is expected to be called with mac_filter_hash_lock
1613 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1614 const u8 *macaddr, s16 vlan)
1616 struct i40e_mac_filter *f;
1619 if (!vsi || !macaddr)
1622 f = i40e_find_filter(vsi, macaddr, vlan);
1624 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1628 /* Update the boolean indicating if we need to function in
1632 vsi->has_vlan_filter = true;
1634 ether_addr_copy(f->macaddr, macaddr);
1636 f->state = I40E_FILTER_NEW;
1637 INIT_HLIST_NODE(&f->hlist);
1639 key = i40e_addr_to_hkey(macaddr);
1640 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1642 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1643 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1646 /* If we're asked to add a filter that has been marked for removal, it
1647 * is safe to simply restore it to active state. __i40e_del_filter
1648 * will have simply deleted any filters which were previously marked
1649 * NEW or FAILED, so if it is currently marked REMOVE it must have
1650 * previously been ACTIVE. Since we haven't yet run the sync filters
1651 * task, just restore this filter to the ACTIVE state so that the
1652 * sync task leaves it in place
1654 if (f->state == I40E_FILTER_REMOVE)
1655 f->state = I40E_FILTER_ACTIVE;
1661 * __i40e_del_filter - Remove a specific filter from the VSI
1662 * @vsi: VSI to remove from
1663 * @f: the filter to remove from the list
1665 * This function should be called instead of i40e_del_filter only if you know
1666 * the exact filter you will remove already, such as via i40e_find_filter or
1669 * NOTE: This function is expected to be called with mac_filter_hash_lock
1671 * ANOTHER NOTE: This function MUST be called from within the context of
1672 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1673 * instead of list_for_each_entry().
1675 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1680 /* If the filter was never added to firmware then we can just delete it
1681 * directly and we don't want to set the status to remove or else an
1682 * admin queue command will unnecessarily fire.
1684 if ((f->state == I40E_FILTER_FAILED) ||
1685 (f->state == I40E_FILTER_NEW)) {
1686 hash_del(&f->hlist);
1689 f->state = I40E_FILTER_REMOVE;
1692 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1693 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1697 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1698 * @vsi: the VSI to be searched
1699 * @macaddr: the MAC address
1702 * NOTE: This function is expected to be called with mac_filter_hash_lock
1704 * ANOTHER NOTE: This function MUST be called from within the context of
1705 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1706 * instead of list_for_each_entry().
1708 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1710 struct i40e_mac_filter *f;
1712 if (!vsi || !macaddr)
1715 f = i40e_find_filter(vsi, macaddr, vlan);
1716 __i40e_del_filter(vsi, f);
1720 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1721 * @vsi: the VSI to be searched
1722 * @macaddr: the mac address to be filtered
1724 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1725 * go through all the macvlan filters and add a macvlan filter for each
1726 * unique vlan that already exists. If a PVID has been assigned, instead only
1727 * add the macaddr to that VLAN.
1729 * Returns last filter added on success, else NULL
1731 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1734 struct i40e_mac_filter *f, *add = NULL;
1735 struct hlist_node *h;
1739 return i40e_add_filter(vsi, macaddr,
1740 le16_to_cpu(vsi->info.pvid));
1742 if (!i40e_is_vsi_in_vlan(vsi))
1743 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1745 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1746 if (f->state == I40E_FILTER_REMOVE)
1748 add = i40e_add_filter(vsi, macaddr, f->vlan);
1757 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1758 * @vsi: the VSI to be searched
1759 * @macaddr: the mac address to be removed
1761 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1764 * Returns 0 for success, or error
1766 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1768 struct i40e_mac_filter *f;
1769 struct hlist_node *h;
1773 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1774 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1775 if (ether_addr_equal(macaddr, f->macaddr)) {
1776 __i40e_del_filter(vsi, f);
1788 * i40e_set_mac - NDO callback to set mac address
1789 * @netdev: network interface device structure
1790 * @p: pointer to an address structure
1792 * Returns 0 on success, negative on failure
1794 static int i40e_set_mac(struct net_device *netdev, void *p)
1796 struct i40e_netdev_priv *np = netdev_priv(netdev);
1797 struct i40e_vsi *vsi = np->vsi;
1798 struct i40e_pf *pf = vsi->back;
1799 struct i40e_hw *hw = &pf->hw;
1800 struct sockaddr *addr = p;
1802 if (!is_valid_ether_addr(addr->sa_data))
1803 return -EADDRNOTAVAIL;
1805 if (test_bit(__I40E_DOWN, pf->state) ||
1806 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1807 return -EADDRNOTAVAIL;
1809 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1810 netdev_info(netdev, "returning to hw mac address %pM\n",
1813 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1815 /* Copy the address first, so that we avoid a possible race with
1817 * - Remove old address from MAC filter
1818 * - Copy new address
1819 * - Add new address to MAC filter
1821 spin_lock_bh(&vsi->mac_filter_hash_lock);
1822 i40e_del_mac_filter(vsi, netdev->dev_addr);
1823 eth_hw_addr_set(netdev, addr->sa_data);
1824 i40e_add_mac_filter(vsi, netdev->dev_addr);
1825 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1827 if (vsi->type == I40E_VSI_MAIN) {
1830 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1831 addr->sa_data, NULL);
1833 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n",
1835 i40e_aq_str(hw, hw->aq.asq_last_status));
1838 /* schedule our worker thread which will take care of
1839 * applying the new filter changes
1841 i40e_service_event_schedule(pf);
1846 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1847 * @vsi: vsi structure
1848 * @seed: RSS hash seed
1849 * @lut: pointer to lookup table of lut_size
1850 * @lut_size: size of the lookup table
1852 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1853 u8 *lut, u16 lut_size)
1855 struct i40e_pf *pf = vsi->back;
1856 struct i40e_hw *hw = &pf->hw;
1860 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1861 (struct i40e_aqc_get_set_rss_key_data *)seed;
1862 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1864 dev_info(&pf->pdev->dev,
1865 "Cannot set RSS key, err %pe aq_err %s\n",
1867 i40e_aq_str(hw, hw->aq.asq_last_status));
1872 bool pf_lut = vsi->type == I40E_VSI_MAIN;
1874 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1876 dev_info(&pf->pdev->dev,
1877 "Cannot set RSS lut, err %pe aq_err %s\n",
1879 i40e_aq_str(hw, hw->aq.asq_last_status));
1887 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1888 * @vsi: VSI structure
1890 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1892 struct i40e_pf *pf = vsi->back;
1893 u8 seed[I40E_HKEY_ARRAY_SIZE];
1897 if (!test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps))
1900 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1901 vsi->num_queue_pairs);
1904 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1908 /* Use the user configured hash keys and lookup table if there is one,
1909 * otherwise use default
1911 if (vsi->rss_lut_user)
1912 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1914 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1915 if (vsi->rss_hkey_user)
1916 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1918 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1919 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1925 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1926 * @vsi: the VSI being configured,
1927 * @ctxt: VSI context structure
1928 * @enabled_tc: number of traffic classes to enable
1930 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1932 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1933 struct i40e_vsi_context *ctxt,
1936 u16 qcount = 0, max_qcount, qmap, sections = 0;
1937 int i, override_q, pow, num_qps, ret;
1938 u8 netdev_tc = 0, offset = 0;
1940 if (vsi->type != I40E_VSI_MAIN)
1942 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1943 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1944 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1945 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1946 num_qps = vsi->mqprio_qopt.qopt.count[0];
1948 /* find the next higher power-of-2 of num queue pairs */
1949 pow = ilog2(num_qps);
1950 if (!is_power_of_2(num_qps))
1952 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1953 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1955 /* Setup queue offset/count for all TCs for given VSI */
1956 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1957 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1958 /* See if the given TC is enabled for the given VSI */
1959 if (vsi->tc_config.enabled_tc & BIT(i)) {
1960 offset = vsi->mqprio_qopt.qopt.offset[i];
1961 qcount = vsi->mqprio_qopt.qopt.count[i];
1962 if (qcount > max_qcount)
1963 max_qcount = qcount;
1964 vsi->tc_config.tc_info[i].qoffset = offset;
1965 vsi->tc_config.tc_info[i].qcount = qcount;
1966 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1968 /* TC is not enabled so set the offset to
1969 * default queue and allocate one queue
1972 vsi->tc_config.tc_info[i].qoffset = 0;
1973 vsi->tc_config.tc_info[i].qcount = 1;
1974 vsi->tc_config.tc_info[i].netdev_tc = 0;
1978 /* Set actual Tx/Rx queue pairs */
1979 vsi->num_queue_pairs = offset + qcount;
1981 /* Setup queue TC[0].qmap for given VSI context */
1982 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1983 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1984 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1985 ctxt->info.valid_sections |= cpu_to_le16(sections);
1987 /* Reconfigure RSS for main VSI with max queue count */
1988 vsi->rss_size = max_qcount;
1989 ret = i40e_vsi_config_rss(vsi);
1991 dev_info(&vsi->back->pdev->dev,
1992 "Failed to reconfig rss for num_queues (%u)\n",
1996 vsi->reconfig_rss = true;
1997 dev_dbg(&vsi->back->pdev->dev,
1998 "Reconfigured rss with num_queues (%u)\n", max_qcount);
2000 /* Find queue count available for channel VSIs and starting offset
2003 override_q = vsi->mqprio_qopt.qopt.count[0];
2004 if (override_q && override_q < vsi->num_queue_pairs) {
2005 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
2006 vsi->next_base_queue = override_q;
2012 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
2013 * @vsi: the VSI being setup
2014 * @ctxt: VSI context structure
2015 * @enabled_tc: Enabled TCs bitmap
2016 * @is_add: True if called before Add VSI
2018 * Setup VSI queue mapping for enabled traffic classes.
2020 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
2021 struct i40e_vsi_context *ctxt,
2025 struct i40e_pf *pf = vsi->back;
2035 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2037 /* zero out queue mapping, it will get updated on the end of the function */
2038 memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
2040 if (vsi->type == I40E_VSI_MAIN) {
2041 /* This code helps add more queue to the VSI if we have
2042 * more cores than RSS can support, the higher cores will
2043 * be served by ATR or other filters. Furthermore, the
2044 * non-zero req_queue_pairs says that user requested a new
2045 * queue count via ethtool's set_channels, so use this
2046 * value for queues distribution across traffic classes
2047 * We need at least one queue pair for the interface
2048 * to be usable as we see in else statement.
2050 if (vsi->req_queue_pairs > 0)
2051 vsi->num_queue_pairs = vsi->req_queue_pairs;
2052 else if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
2053 vsi->num_queue_pairs = pf->num_lan_msix;
2055 vsi->num_queue_pairs = 1;
2058 /* Number of queues per enabled TC */
2059 if (vsi->type == I40E_VSI_MAIN ||
2060 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
2061 num_tc_qps = vsi->num_queue_pairs;
2063 num_tc_qps = vsi->alloc_queue_pairs;
2065 if (enabled_tc && test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) {
2066 /* Find numtc from enabled TC bitmap */
2067 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2068 if (enabled_tc & BIT(i)) /* TC is enabled */
2072 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
2075 num_tc_qps = num_tc_qps / numtc;
2076 num_tc_qps = min_t(int, num_tc_qps,
2077 i40e_pf_get_max_q_per_tc(pf));
2080 vsi->tc_config.numtc = numtc;
2081 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
2083 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
2084 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
2085 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
2087 /* Setup queue offset/count for all TCs for given VSI */
2088 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2089 /* See if the given TC is enabled for the given VSI */
2090 if (vsi->tc_config.enabled_tc & BIT(i)) {
2094 switch (vsi->type) {
2096 if ((!test_bit(I40E_FLAG_FD_SB_ENA,
2098 !test_bit(I40E_FLAG_FD_ATR_ENA,
2100 vsi->tc_config.enabled_tc != 1) {
2101 qcount = min_t(int, pf->alloc_rss_size,
2107 case I40E_VSI_SRIOV:
2108 case I40E_VSI_VMDQ2:
2110 qcount = num_tc_qps;
2114 vsi->tc_config.tc_info[i].qoffset = offset;
2115 vsi->tc_config.tc_info[i].qcount = qcount;
2117 /* find the next higher power-of-2 of num queue pairs */
2120 while (num_qps && (BIT_ULL(pow) < qcount)) {
2125 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
2127 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2128 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
2132 /* TC is not enabled so set the offset to
2133 * default queue and allocate one queue
2136 vsi->tc_config.tc_info[i].qoffset = 0;
2137 vsi->tc_config.tc_info[i].qcount = 1;
2138 vsi->tc_config.tc_info[i].netdev_tc = 0;
2142 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
2144 /* Do not change previously set num_queue_pairs for PFs and VFs*/
2145 if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
2146 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
2147 (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
2148 vsi->num_queue_pairs = offset;
2150 /* Scheduler section valid can only be set for ADD VSI */
2152 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
2154 ctxt->info.up_enable_bits = enabled_tc;
2156 if (vsi->type == I40E_VSI_SRIOV) {
2157 ctxt->info.mapping_flags |=
2158 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2159 for (i = 0; i < vsi->num_queue_pairs; i++)
2160 ctxt->info.queue_mapping[i] =
2161 cpu_to_le16(vsi->base_queue + i);
2163 ctxt->info.mapping_flags |=
2164 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2165 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
2167 ctxt->info.valid_sections |= cpu_to_le16(sections);
2171 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
2172 * @netdev: the netdevice
2173 * @addr: address to add
2175 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
2176 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
2178 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
2180 struct i40e_netdev_priv *np = netdev_priv(netdev);
2181 struct i40e_vsi *vsi = np->vsi;
2183 if (i40e_add_mac_filter(vsi, addr))
2190 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
2191 * @netdev: the netdevice
2192 * @addr: address to add
2194 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
2195 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
2197 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
2199 struct i40e_netdev_priv *np = netdev_priv(netdev);
2200 struct i40e_vsi *vsi = np->vsi;
2202 /* Under some circumstances, we might receive a request to delete
2203 * our own device address from our uc list. Because we store the
2204 * device address in the VSI's MAC/VLAN filter list, we need to ignore
2205 * such requests and not delete our device address from this list.
2207 if (ether_addr_equal(addr, netdev->dev_addr))
2210 i40e_del_mac_filter(vsi, addr);
2216 * i40e_set_rx_mode - NDO callback to set the netdev filters
2217 * @netdev: network interface device structure
2219 static void i40e_set_rx_mode(struct net_device *netdev)
2221 struct i40e_netdev_priv *np = netdev_priv(netdev);
2222 struct i40e_vsi *vsi = np->vsi;
2224 spin_lock_bh(&vsi->mac_filter_hash_lock);
2226 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2227 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2229 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2231 /* check for other flag changes */
2232 if (vsi->current_netdev_flags != vsi->netdev->flags) {
2233 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2234 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
2239 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
2240 * @vsi: Pointer to VSI struct
2241 * @from: Pointer to list which contains MAC filter entries - changes to
2242 * those entries needs to be undone.
2244 * MAC filter entries from this list were slated for deletion.
2246 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
2247 struct hlist_head *from)
2249 struct i40e_mac_filter *f;
2250 struct hlist_node *h;
2252 hlist_for_each_entry_safe(f, h, from, hlist) {
2253 u64 key = i40e_addr_to_hkey(f->macaddr);
2255 /* Move the element back into MAC filter list*/
2256 hlist_del(&f->hlist);
2257 hash_add(vsi->mac_filter_hash, &f->hlist, key);
2262 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
2263 * @vsi: Pointer to vsi struct
2264 * @from: Pointer to list which contains MAC filter entries - changes to
2265 * those entries needs to be undone.
2267 * MAC filter entries from this list were slated for addition.
2269 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
2270 struct hlist_head *from)
2272 struct i40e_new_mac_filter *new;
2273 struct hlist_node *h;
2275 hlist_for_each_entry_safe(new, h, from, hlist) {
2276 /* We can simply free the wrapper structure */
2277 hlist_del(&new->hlist);
2278 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2284 * i40e_next_filter - Get the next non-broadcast filter from a list
2285 * @next: pointer to filter in list
2287 * Returns the next non-broadcast filter in the list. Required so that we
2288 * ignore broadcast filters within the list, since these are not handled via
2289 * the normal firmware update path.
2292 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2294 hlist_for_each_entry_continue(next, hlist) {
2295 if (!is_broadcast_ether_addr(next->f->macaddr))
2303 * i40e_update_filter_state - Update filter state based on return data
2305 * @count: Number of filters added
2306 * @add_list: return data from fw
2307 * @add_head: pointer to first filter in current batch
2309 * MAC filter entries from list were slated to be added to device. Returns
2310 * number of successful filters. Note that 0 does NOT mean success!
2313 i40e_update_filter_state(int count,
2314 struct i40e_aqc_add_macvlan_element_data *add_list,
2315 struct i40e_new_mac_filter *add_head)
2320 for (i = 0; i < count; i++) {
2321 /* Always check status of each filter. We don't need to check
2322 * the firmware return status because we pre-set the filter
2323 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2324 * request to the adminq. Thus, if it no longer matches then
2325 * we know the filter is active.
2327 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2328 add_head->state = I40E_FILTER_FAILED;
2330 add_head->state = I40E_FILTER_ACTIVE;
2334 add_head = i40e_next_filter(add_head);
2343 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2344 * @vsi: ptr to the VSI
2345 * @vsi_name: name to display in messages
2346 * @list: the list of filters to send to firmware
2347 * @num_del: the number of filters to delete
2348 * @retval: Set to -EIO on failure to delete
2350 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2351 * *retval instead of a return value so that success does not force ret_val to
2352 * be set to 0. This ensures that a sequence of calls to this function
2353 * preserve the previous value of *retval on successful delete.
2356 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2357 struct i40e_aqc_remove_macvlan_element_data *list,
2358 int num_del, int *retval)
2360 struct i40e_hw *hw = &vsi->back->hw;
2361 enum i40e_admin_queue_err aq_status;
2364 aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
2367 /* Explicitly ignore and do not report when firmware returns ENOENT */
2368 if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
2370 dev_info(&vsi->back->pdev->dev,
2371 "ignoring delete macvlan error on %s, err %pe, aq_err %s\n",
2372 vsi_name, ERR_PTR(aq_ret),
2373 i40e_aq_str(hw, aq_status));
2378 * i40e_aqc_add_filters - Request firmware to add a set of filters
2379 * @vsi: ptr to the VSI
2380 * @vsi_name: name to display in messages
2381 * @list: the list of filters to send to firmware
2382 * @add_head: Position in the add hlist
2383 * @num_add: the number of filters to add
2385 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2386 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2387 * space for more filters.
2390 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2391 struct i40e_aqc_add_macvlan_element_data *list,
2392 struct i40e_new_mac_filter *add_head,
2395 struct i40e_hw *hw = &vsi->back->hw;
2396 enum i40e_admin_queue_err aq_status;
2399 i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
2400 fcnt = i40e_update_filter_state(num_add, list, add_head);
2402 if (fcnt != num_add) {
2403 if (vsi->type == I40E_VSI_MAIN) {
2404 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2405 dev_warn(&vsi->back->pdev->dev,
2406 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2407 i40e_aq_str(hw, aq_status), vsi_name);
2408 } else if (vsi->type == I40E_VSI_SRIOV ||
2409 vsi->type == I40E_VSI_VMDQ1 ||
2410 vsi->type == I40E_VSI_VMDQ2) {
2411 dev_warn(&vsi->back->pdev->dev,
2412 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2413 i40e_aq_str(hw, aq_status), vsi_name,
2416 dev_warn(&vsi->back->pdev->dev,
2417 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2418 i40e_aq_str(hw, aq_status), vsi_name,
2425 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2426 * @vsi: pointer to the VSI
2427 * @vsi_name: the VSI name
2430 * This function sets or clears the promiscuous broadcast flags for VLAN
2431 * filters in order to properly receive broadcast frames. Assumes that only
2432 * broadcast filters are passed.
2434 * Returns status indicating success or failure;
2437 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2438 struct i40e_mac_filter *f)
2440 bool enable = f->state == I40E_FILTER_NEW;
2441 struct i40e_hw *hw = &vsi->back->hw;
2444 if (f->vlan == I40E_VLAN_ANY) {
2445 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2450 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2458 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2459 dev_warn(&vsi->back->pdev->dev,
2460 "Error %s, forcing overflow promiscuous on %s\n",
2461 i40e_aq_str(hw, hw->aq.asq_last_status),
2469 * i40e_set_promiscuous - set promiscuous mode
2470 * @pf: board private structure
2471 * @promisc: promisc on or off
2473 * There are different ways of setting promiscuous mode on a PF depending on
2474 * what state/environment we're in. This identifies and sets it appropriately.
2475 * Returns 0 on success.
2477 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2479 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2480 struct i40e_hw *hw = &pf->hw;
2483 if (vsi->type == I40E_VSI_MAIN &&
2484 pf->lan_veb != I40E_NO_VEB &&
2485 !test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
2486 /* set defport ON for Main VSI instead of true promisc
2487 * this way we will get all unicast/multicast and VLAN
2488 * promisc behavior but will not get VF or VMDq traffic
2489 * replicated on the Main VSI.
2492 aq_ret = i40e_aq_set_default_vsi(hw,
2496 aq_ret = i40e_aq_clear_default_vsi(hw,
2500 dev_info(&pf->pdev->dev,
2501 "Set default VSI failed, err %pe, aq_err %s\n",
2503 i40e_aq_str(hw, hw->aq.asq_last_status));
2506 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2512 dev_info(&pf->pdev->dev,
2513 "set unicast promisc failed, err %pe, aq_err %s\n",
2515 i40e_aq_str(hw, hw->aq.asq_last_status));
2517 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2522 dev_info(&pf->pdev->dev,
2523 "set multicast promisc failed, err %pe, aq_err %s\n",
2525 i40e_aq_str(hw, hw->aq.asq_last_status));
2530 pf->cur_promisc = promisc;
2536 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2537 * @vsi: ptr to the VSI
2539 * Push any outstanding VSI filter changes through the AdminQ.
2541 * Returns 0 or error value
2543 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2545 struct hlist_head tmp_add_list, tmp_del_list;
2546 struct i40e_mac_filter *f;
2547 struct i40e_new_mac_filter *new, *add_head = NULL;
2548 struct i40e_hw *hw = &vsi->back->hw;
2549 bool old_overflow, new_overflow;
2550 unsigned int failed_filters = 0;
2551 unsigned int vlan_filters = 0;
2552 char vsi_name[16] = "PF";
2553 int filter_list_len = 0;
2554 u32 changed_flags = 0;
2555 struct hlist_node *h;
2565 /* empty array typed pointers, kcalloc later */
2566 struct i40e_aqc_add_macvlan_element_data *add_list;
2567 struct i40e_aqc_remove_macvlan_element_data *del_list;
2569 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2570 usleep_range(1000, 2000);
2573 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2576 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2577 vsi->current_netdev_flags = vsi->netdev->flags;
2580 INIT_HLIST_HEAD(&tmp_add_list);
2581 INIT_HLIST_HEAD(&tmp_del_list);
2583 if (vsi->type == I40E_VSI_SRIOV)
2584 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2585 else if (vsi->type != I40E_VSI_MAIN)
2586 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2588 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2589 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2591 spin_lock_bh(&vsi->mac_filter_hash_lock);
2592 /* Create a list of filters to delete. */
2593 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2594 if (f->state == I40E_FILTER_REMOVE) {
2595 /* Move the element into temporary del_list */
2596 hash_del(&f->hlist);
2597 hlist_add_head(&f->hlist, &tmp_del_list);
2599 /* Avoid counting removed filters */
2602 if (f->state == I40E_FILTER_NEW) {
2603 /* Create a temporary i40e_new_mac_filter */
2604 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2606 goto err_no_memory_locked;
2608 /* Store pointer to the real filter */
2610 new->state = f->state;
2612 /* Add it to the hash list */
2613 hlist_add_head(&new->hlist, &tmp_add_list);
2616 /* Count the number of active (current and new) VLAN
2617 * filters we have now. Does not count filters which
2618 * are marked for deletion.
2624 if (vsi->type != I40E_VSI_SRIOV)
2625 retval = i40e_correct_mac_vlan_filters
2626 (vsi, &tmp_add_list, &tmp_del_list,
2629 retval = i40e_correct_vf_mac_vlan_filters
2630 (vsi, &tmp_add_list, &tmp_del_list,
2631 vlan_filters, pf->vf[vsi->vf_id].trusted);
2633 hlist_for_each_entry(new, &tmp_add_list, hlist)
2634 netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
2637 goto err_no_memory_locked;
2639 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2642 /* Now process 'del_list' outside the lock */
2643 if (!hlist_empty(&tmp_del_list)) {
2644 filter_list_len = hw->aq.asq_buf_size /
2645 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2646 list_size = filter_list_len *
2647 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2648 del_list = kzalloc(list_size, GFP_ATOMIC);
2652 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2655 /* handle broadcast filters by updating the broadcast
2656 * promiscuous flag and release filter list.
2658 if (is_broadcast_ether_addr(f->macaddr)) {
2659 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2661 hlist_del(&f->hlist);
2666 /* add to delete list */
2667 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2668 if (f->vlan == I40E_VLAN_ANY) {
2669 del_list[num_del].vlan_tag = 0;
2670 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2672 del_list[num_del].vlan_tag =
2673 cpu_to_le16((u16)(f->vlan));
2676 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2677 del_list[num_del].flags = cmd_flags;
2680 /* flush a full buffer */
2681 if (num_del == filter_list_len) {
2682 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2684 memset(del_list, 0, list_size);
2687 /* Release memory for MAC filter entries which were
2688 * synced up with HW.
2690 hlist_del(&f->hlist);
2695 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2703 if (!hlist_empty(&tmp_add_list)) {
2704 /* Do all the adds now. */
2705 filter_list_len = hw->aq.asq_buf_size /
2706 sizeof(struct i40e_aqc_add_macvlan_element_data);
2707 list_size = filter_list_len *
2708 sizeof(struct i40e_aqc_add_macvlan_element_data);
2709 add_list = kzalloc(list_size, GFP_ATOMIC);
2714 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2715 /* handle broadcast filters by updating the broadcast
2716 * promiscuous flag instead of adding a MAC filter.
2718 if (is_broadcast_ether_addr(new->f->macaddr)) {
2719 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2721 new->state = I40E_FILTER_FAILED;
2723 new->state = I40E_FILTER_ACTIVE;
2727 /* add to add array */
2731 ether_addr_copy(add_list[num_add].mac_addr,
2733 if (new->f->vlan == I40E_VLAN_ANY) {
2734 add_list[num_add].vlan_tag = 0;
2735 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2737 add_list[num_add].vlan_tag =
2738 cpu_to_le16((u16)(new->f->vlan));
2740 add_list[num_add].queue_number = 0;
2741 /* set invalid match method for later detection */
2742 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2743 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2744 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2747 /* flush a full buffer */
2748 if (num_add == filter_list_len) {
2749 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2751 memset(add_list, 0, list_size);
2756 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2759 /* Now move all of the filters from the temp add list back to
2762 spin_lock_bh(&vsi->mac_filter_hash_lock);
2763 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2764 /* Only update the state if we're still NEW */
2765 if (new->f->state == I40E_FILTER_NEW)
2766 new->f->state = new->state;
2767 hlist_del(&new->hlist);
2768 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2771 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2776 /* Determine the number of active and failed filters. */
2777 spin_lock_bh(&vsi->mac_filter_hash_lock);
2778 vsi->active_filters = 0;
2779 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2780 if (f->state == I40E_FILTER_ACTIVE)
2781 vsi->active_filters++;
2782 else if (f->state == I40E_FILTER_FAILED)
2785 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2787 /* Check if we are able to exit overflow promiscuous mode. We can
2788 * safely exit if we didn't just enter, we no longer have any failed
2789 * filters, and we have reduced filters below the threshold value.
2791 if (old_overflow && !failed_filters &&
2792 vsi->active_filters < vsi->promisc_threshold) {
2793 dev_info(&pf->pdev->dev,
2794 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2796 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2797 vsi->promisc_threshold = 0;
2800 /* if the VF is not trusted do not do promisc */
2801 if (vsi->type == I40E_VSI_SRIOV && pf->vf &&
2802 !pf->vf[vsi->vf_id].trusted) {
2803 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2807 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2809 /* If we are entering overflow promiscuous, we need to calculate a new
2810 * threshold for when we are safe to exit
2812 if (!old_overflow && new_overflow)
2813 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2815 /* check for changes in promiscuous modes */
2816 if (changed_flags & IFF_ALLMULTI) {
2817 bool cur_multipromisc;
2819 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2820 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2825 retval = i40e_aq_rc_to_posix(aq_ret,
2826 hw->aq.asq_last_status);
2827 dev_info(&pf->pdev->dev,
2828 "set multi promisc failed on %s, err %pe aq_err %s\n",
2831 i40e_aq_str(hw, hw->aq.asq_last_status));
2833 dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
2834 cur_multipromisc ? "entering" : "leaving");
2838 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2841 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2843 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2845 retval = i40e_aq_rc_to_posix(aq_ret,
2846 hw->aq.asq_last_status);
2847 dev_info(&pf->pdev->dev,
2848 "Setting promiscuous %s failed on %s, err %pe aq_err %s\n",
2849 cur_promisc ? "on" : "off",
2852 i40e_aq_str(hw, hw->aq.asq_last_status));
2856 /* if something went wrong then set the changed flag so we try again */
2858 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2860 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2864 /* Restore elements on the temporary add and delete lists */
2865 spin_lock_bh(&vsi->mac_filter_hash_lock);
2866 err_no_memory_locked:
2867 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2868 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2869 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2871 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2872 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2877 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2878 * @pf: board private structure
2880 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2886 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2888 if (test_bit(__I40E_VF_DISABLE, pf->state)) {
2889 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2893 for (v = 0; v < pf->num_alloc_vsi; v++) {
2895 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
2896 !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
2897 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2900 /* come back and try again later */
2901 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2910 * i40e_calculate_vsi_rx_buf_len - Calculates buffer length
2912 * @vsi: VSI to calculate rx_buf_len from
2914 static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi)
2916 if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags))
2917 return SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048);
2919 return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048;
2923 * i40e_max_vsi_frame_size - returns the maximum allowed frame size for VSI
2925 * @xdp_prog: XDP program
2927 static int i40e_max_vsi_frame_size(struct i40e_vsi *vsi,
2928 struct bpf_prog *xdp_prog)
2930 u16 rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi);
2933 if (xdp_prog && !xdp_prog->aux->xdp_has_frags)
2936 chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
2938 return min_t(u16, rx_buf_len * chain_len, I40E_MAX_RXBUFFER);
2942 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2943 * @netdev: network interface device structure
2944 * @new_mtu: new value for maximum frame size
2946 * Returns 0 on success, negative on failure
2948 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2950 struct i40e_netdev_priv *np = netdev_priv(netdev);
2951 struct i40e_vsi *vsi = np->vsi;
2952 struct i40e_pf *pf = vsi->back;
2955 frame_size = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog);
2956 if (new_mtu > frame_size - I40E_PACKET_HDR_PAD) {
2957 netdev_err(netdev, "Error changing mtu to %d, Max is %d\n",
2958 new_mtu, frame_size - I40E_PACKET_HDR_PAD);
2962 netdev_dbg(netdev, "changing MTU from %d to %d\n",
2963 netdev->mtu, new_mtu);
2964 netdev->mtu = new_mtu;
2965 if (netif_running(netdev))
2966 i40e_vsi_reinit_locked(vsi);
2967 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2968 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2973 * i40e_ioctl - Access the hwtstamp interface
2974 * @netdev: network interface device structure
2975 * @ifr: interface request data
2976 * @cmd: ioctl command
2978 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2980 struct i40e_netdev_priv *np = netdev_priv(netdev);
2981 struct i40e_pf *pf = np->vsi->back;
2985 return i40e_ptp_get_ts_config(pf, ifr);
2987 return i40e_ptp_set_ts_config(pf, ifr);
2994 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2995 * @vsi: the vsi being adjusted
2997 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2999 struct i40e_vsi_context ctxt;
3002 /* Don't modify stripping options if a port VLAN is active */
3006 if ((vsi->info.valid_sections &
3007 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
3008 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
3009 return; /* already enabled */
3011 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3012 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
3013 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3015 ctxt.seid = vsi->seid;
3016 ctxt.info = vsi->info;
3017 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3019 dev_info(&vsi->back->pdev->dev,
3020 "update vlan stripping failed, err %pe aq_err %s\n",
3022 i40e_aq_str(&vsi->back->hw,
3023 vsi->back->hw.aq.asq_last_status));
3028 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
3029 * @vsi: the vsi being adjusted
3031 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
3033 struct i40e_vsi_context ctxt;
3036 /* Don't modify stripping options if a port VLAN is active */
3040 if ((vsi->info.valid_sections &
3041 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
3042 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
3043 I40E_AQ_VSI_PVLAN_EMOD_MASK))
3044 return; /* already disabled */
3046 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3047 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
3048 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
3050 ctxt.seid = vsi->seid;
3051 ctxt.info = vsi->info;
3052 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3054 dev_info(&vsi->back->pdev->dev,
3055 "update vlan stripping failed, err %pe aq_err %s\n",
3057 i40e_aq_str(&vsi->back->hw,
3058 vsi->back->hw.aq.asq_last_status));
3063 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
3064 * @vsi: the vsi being configured
3065 * @vid: vlan id to be added (0 = untagged only , -1 = any)
3067 * This is a helper function for adding a new MAC/VLAN filter with the
3068 * specified VLAN for each existing MAC address already in the hash table.
3069 * This function does *not* perform any accounting to update filters based on
3072 * NOTE: this function expects to be called while under the
3073 * mac_filter_hash_lock
3075 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
3077 struct i40e_mac_filter *f, *add_f;
3078 struct hlist_node *h;
3081 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3082 /* If we're asked to add a filter that has been marked for
3083 * removal, it is safe to simply restore it to active state.
3084 * __i40e_del_filter will have simply deleted any filters which
3085 * were previously marked NEW or FAILED, so if it is currently
3086 * marked REMOVE it must have previously been ACTIVE. Since we
3087 * haven't yet run the sync filters task, just restore this
3088 * filter to the ACTIVE state so that the sync task leaves it
3091 if (f->state == I40E_FILTER_REMOVE && f->vlan == vid) {
3092 f->state = I40E_FILTER_ACTIVE;
3094 } else if (f->state == I40E_FILTER_REMOVE) {
3097 add_f = i40e_add_filter(vsi, f->macaddr, vid);
3099 dev_info(&vsi->back->pdev->dev,
3100 "Could not add vlan filter %d for %pM\n",
3110 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
3111 * @vsi: the VSI being configured
3112 * @vid: VLAN id to be added
3114 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
3121 /* The network stack will attempt to add VID=0, with the intention to
3122 * receive priority tagged packets with a VLAN of 0. Our HW receives
3123 * these packets by default when configured to receive untagged
3124 * packets, so we don't need to add a filter for this case.
3125 * Additionally, HW interprets adding a VID=0 filter as meaning to
3126 * receive *only* tagged traffic and stops receiving untagged traffic.
3127 * Thus, we do not want to actually add a filter for VID=0
3132 /* Locked once because all functions invoked below iterates list*/
3133 spin_lock_bh(&vsi->mac_filter_hash_lock);
3134 err = i40e_add_vlan_all_mac(vsi, vid);
3135 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3139 /* schedule our worker thread which will take care of
3140 * applying the new filter changes
3142 i40e_service_event_schedule(vsi->back);
3147 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
3148 * @vsi: the vsi being configured
3149 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
3151 * This function should be used to remove all VLAN filters which match the
3152 * given VID. It does not schedule the service event and does not take the
3153 * mac_filter_hash_lock so it may be combined with other operations under
3154 * a single invocation of the mac_filter_hash_lock.
3156 * NOTE: this function expects to be called while under the
3157 * mac_filter_hash_lock
3159 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
3161 struct i40e_mac_filter *f;
3162 struct hlist_node *h;
3165 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3167 __i40e_del_filter(vsi, f);
3172 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
3173 * @vsi: the VSI being configured
3174 * @vid: VLAN id to be removed
3176 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
3178 if (!vid || vsi->info.pvid)
3181 spin_lock_bh(&vsi->mac_filter_hash_lock);
3182 i40e_rm_vlan_all_mac(vsi, vid);
3183 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3185 /* schedule our worker thread which will take care of
3186 * applying the new filter changes
3188 i40e_service_event_schedule(vsi->back);
3192 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
3193 * @netdev: network interface to be adjusted
3194 * @proto: unused protocol value
3195 * @vid: vlan id to be added
3197 * net_device_ops implementation for adding vlan ids
3199 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
3200 __always_unused __be16 proto, u16 vid)
3202 struct i40e_netdev_priv *np = netdev_priv(netdev);
3203 struct i40e_vsi *vsi = np->vsi;
3206 if (vid >= VLAN_N_VID)
3209 ret = i40e_vsi_add_vlan(vsi, vid);
3211 set_bit(vid, vsi->active_vlans);
3217 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
3218 * @netdev: network interface to be adjusted
3219 * @proto: unused protocol value
3220 * @vid: vlan id to be added
3222 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
3223 __always_unused __be16 proto, u16 vid)
3225 struct i40e_netdev_priv *np = netdev_priv(netdev);
3226 struct i40e_vsi *vsi = np->vsi;
3228 if (vid >= VLAN_N_VID)
3230 set_bit(vid, vsi->active_vlans);
3234 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
3235 * @netdev: network interface to be adjusted
3236 * @proto: unused protocol value
3237 * @vid: vlan id to be removed
3239 * net_device_ops implementation for removing vlan ids
3241 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
3242 __always_unused __be16 proto, u16 vid)
3244 struct i40e_netdev_priv *np = netdev_priv(netdev);
3245 struct i40e_vsi *vsi = np->vsi;
3247 /* return code is ignored as there is nothing a user
3248 * can do about failure to remove and a log message was
3249 * already printed from the other function
3251 i40e_vsi_kill_vlan(vsi, vid);
3253 clear_bit(vid, vsi->active_vlans);
3259 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
3260 * @vsi: the vsi being brought back up
3262 static void i40e_restore_vlan(struct i40e_vsi *vsi)
3269 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3270 i40e_vlan_stripping_enable(vsi);
3272 i40e_vlan_stripping_disable(vsi);
3274 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
3275 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
3280 * i40e_vsi_add_pvid - Add pvid for the VSI
3281 * @vsi: the vsi being adjusted
3282 * @vid: the vlan id to set as a PVID
3284 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
3286 struct i40e_vsi_context ctxt;
3289 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3290 vsi->info.pvid = cpu_to_le16(vid);
3291 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
3292 I40E_AQ_VSI_PVLAN_INSERT_PVID |
3293 I40E_AQ_VSI_PVLAN_EMOD_STR;
3295 ctxt.seid = vsi->seid;
3296 ctxt.info = vsi->info;
3297 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3299 dev_info(&vsi->back->pdev->dev,
3300 "add pvid failed, err %pe aq_err %s\n",
3302 i40e_aq_str(&vsi->back->hw,
3303 vsi->back->hw.aq.asq_last_status));
3311 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
3312 * @vsi: the vsi being adjusted
3314 * Just use the vlan_rx_register() service to put it back to normal
3316 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
3320 i40e_vlan_stripping_disable(vsi);
3324 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
3325 * @vsi: ptr to the VSI
3327 * If this function returns with an error, then it's possible one or
3328 * more of the rings is populated (while the rest are not). It is the
3329 * callers duty to clean those orphaned rings.
3331 * Return 0 on success, negative on failure
3333 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3337 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3338 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3340 if (!i40e_enabled_xdp_vsi(vsi))
3343 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3344 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3350 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3351 * @vsi: ptr to the VSI
3353 * Free VSI's transmit software resources
3355 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3359 if (vsi->tx_rings) {
3360 for (i = 0; i < vsi->num_queue_pairs; i++)
3361 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3362 i40e_free_tx_resources(vsi->tx_rings[i]);
3365 if (vsi->xdp_rings) {
3366 for (i = 0; i < vsi->num_queue_pairs; i++)
3367 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3368 i40e_free_tx_resources(vsi->xdp_rings[i]);
3373 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3374 * @vsi: ptr to the VSI
3376 * If this function returns with an error, then it's possible one or
3377 * more of the rings is populated (while the rest are not). It is the
3378 * callers duty to clean those orphaned rings.
3380 * Return 0 on success, negative on failure
3382 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3386 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3387 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3392 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3393 * @vsi: ptr to the VSI
3395 * Free all receive software resources
3397 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3404 for (i = 0; i < vsi->num_queue_pairs; i++)
3405 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3406 i40e_free_rx_resources(vsi->rx_rings[i]);
3410 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3411 * @ring: The Tx ring to configure
3413 * This enables/disables XPS for a given Tx descriptor ring
3414 * based on the TCs enabled for the VSI that ring belongs to.
3416 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3420 if (!ring->q_vector || !ring->netdev || ring->ch)
3423 /* We only initialize XPS once, so as not to overwrite user settings */
3424 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3427 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3428 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3433 * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
3434 * @ring: The Tx or Rx ring
3436 * Returns the AF_XDP buffer pool or NULL.
3438 static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
3440 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3441 int qid = ring->queue_index;
3443 if (ring_is_xdp(ring))
3444 qid -= ring->vsi->alloc_queue_pairs;
3446 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3449 return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
3453 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3454 * @ring: The Tx ring to configure
3456 * Configure the Tx descriptor ring in the HMC context.
3458 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3460 struct i40e_vsi *vsi = ring->vsi;
3461 u16 pf_q = vsi->base_queue + ring->queue_index;
3462 struct i40e_hw *hw = &vsi->back->hw;
3463 struct i40e_hmc_obj_txq tx_ctx;
3467 if (ring_is_xdp(ring))
3468 ring->xsk_pool = i40e_xsk_pool(ring);
3470 /* some ATR related tx ring init */
3471 if (test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags)) {
3472 ring->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
3473 ring->atr_count = 0;
3475 ring->atr_sample_rate = 0;
3479 i40e_config_xps_tx_ring(ring);
3481 /* clear the context structure first */
3482 memset(&tx_ctx, 0, sizeof(tx_ctx));
3484 tx_ctx.new_context = 1;
3485 tx_ctx.base = (ring->dma / 128);
3486 tx_ctx.qlen = ring->count;
3487 if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags) ||
3488 test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags))
3490 if (test_bit(I40E_FLAG_PTP_ENA, vsi->back->flags))
3491 tx_ctx.timesync_ena = 1;
3492 /* FDIR VSI tx ring can still use RS bit and writebacks */
3493 if (vsi->type != I40E_VSI_FDIR)
3494 tx_ctx.head_wb_ena = 1;
3495 tx_ctx.head_wb_addr = ring->dma +
3496 (ring->count * sizeof(struct i40e_tx_desc));
3498 /* As part of VSI creation/update, FW allocates certain
3499 * Tx arbitration queue sets for each TC enabled for
3500 * the VSI. The FW returns the handles to these queue
3501 * sets as part of the response buffer to Add VSI,
3502 * Update VSI, etc. AQ commands. It is expected that
3503 * these queue set handles be associated with the Tx
3504 * queues by the driver as part of the TX queue context
3505 * initialization. This has to be done regardless of
3506 * DCB as by default everything is mapped to TC0.
3511 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3514 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3516 tx_ctx.rdylist_act = 0;
3518 /* clear the context in the HMC */
3519 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3521 dev_info(&vsi->back->pdev->dev,
3522 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3523 ring->queue_index, pf_q, err);
3527 /* set the context in the HMC */
3528 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3530 dev_info(&vsi->back->pdev->dev,
3531 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3532 ring->queue_index, pf_q, err);
3536 /* Now associate this queue with this PCI function */
3538 if (ring->ch->type == I40E_VSI_VMDQ2)
3539 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3543 qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK,
3544 ring->ch->vsi_number);
3546 if (vsi->type == I40E_VSI_VMDQ2) {
3547 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3548 qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK,
3551 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3555 qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id);
3556 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3559 /* cache tail off for easier writes later */
3560 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3566 * i40e_rx_offset - Return expected offset into page to access data
3567 * @rx_ring: Ring we are requesting offset of
3569 * Returns the offset value for ring into the data buffer.
3571 static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
3573 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
3577 * i40e_configure_rx_ring - Configure a receive ring context
3578 * @ring: The Rx ring to configure
3580 * Configure the Rx descriptor ring in the HMC context.
3582 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3584 struct i40e_vsi *vsi = ring->vsi;
3585 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3586 u16 pf_q = vsi->base_queue + ring->queue_index;
3587 struct i40e_hw *hw = &vsi->back->hw;
3588 struct i40e_hmc_obj_rxq rx_ctx;
3593 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3595 /* clear the context structure first */
3596 memset(&rx_ctx, 0, sizeof(rx_ctx));
3598 if (ring->vsi->type == I40E_VSI_MAIN)
3599 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3601 ring->xsk_pool = i40e_xsk_pool(ring);
3602 if (ring->xsk_pool) {
3604 xsk_pool_get_rx_frame_size(ring->xsk_pool);
3605 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3606 MEM_TYPE_XSK_BUFF_POOL,
3610 dev_info(&vsi->back->pdev->dev,
3611 "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
3615 ring->rx_buf_len = vsi->rx_buf_len;
3616 if (ring->vsi->type == I40E_VSI_MAIN) {
3617 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3618 MEM_TYPE_PAGE_SHARED,
3625 xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
3627 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3628 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3630 rx_ctx.base = (ring->dma / 128);
3631 rx_ctx.qlen = ring->count;
3633 /* use 16 byte descriptors */
3636 /* descriptor type is always zero
3639 rx_ctx.hsplit_0 = 0;
3641 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3642 if (hw->revision_id == 0)
3643 rx_ctx.lrxqthresh = 0;
3645 rx_ctx.lrxqthresh = 1;
3646 rx_ctx.crcstrip = 1;
3648 /* this controls whether VLAN is stripped from inner headers */
3650 /* set the prefena field to 1 because the manual says to */
3653 /* clear the context in the HMC */
3654 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3656 dev_info(&vsi->back->pdev->dev,
3657 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3658 ring->queue_index, pf_q, err);
3662 /* set the context in the HMC */
3663 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3665 dev_info(&vsi->back->pdev->dev,
3666 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3667 ring->queue_index, pf_q, err);
3671 /* configure Rx buffer alignment */
3672 if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags)) {
3673 if (I40E_2K_TOO_SMALL_WITH_PADDING) {
3674 dev_info(&vsi->back->pdev->dev,
3675 "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
3678 clear_ring_build_skb_enabled(ring);
3680 set_ring_build_skb_enabled(ring);
3683 ring->rx_offset = i40e_rx_offset(ring);
3685 /* cache tail for quicker writes, and clear the reg before use */
3686 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3687 writel(0, ring->tail);
3689 if (ring->xsk_pool) {
3690 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
3691 ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
3693 ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3696 /* Log this in case the user has forgotten to give the kernel
3697 * any buffers, even later in the application.
3699 dev_info(&vsi->back->pdev->dev,
3700 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3701 ring->xsk_pool ? "AF_XDP ZC enabled " : "",
3702 ring->queue_index, pf_q);
3709 * i40e_vsi_configure_tx - Configure the VSI for Tx
3710 * @vsi: VSI structure describing this set of rings and resources
3712 * Configure the Tx VSI for operation.
3714 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3719 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3720 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3722 if (err || !i40e_enabled_xdp_vsi(vsi))
3725 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3726 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3732 * i40e_vsi_configure_rx - Configure the VSI for Rx
3733 * @vsi: the VSI being configured
3735 * Configure the Rx VSI for operation.
3737 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3742 vsi->max_frame = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog);
3743 vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi);
3745 #if (PAGE_SIZE < 8192)
3746 if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING &&
3747 vsi->netdev->mtu <= ETH_DATA_LEN) {
3748 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3749 vsi->max_frame = vsi->rx_buf_len;
3753 /* set up individual rings */
3754 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3755 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3761 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3762 * @vsi: ptr to the VSI
3764 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3766 struct i40e_ring *tx_ring, *rx_ring;
3767 u16 qoffset, qcount;
3770 if (!test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) {
3771 /* Reset the TC information */
3772 for (i = 0; i < vsi->num_queue_pairs; i++) {
3773 rx_ring = vsi->rx_rings[i];
3774 tx_ring = vsi->tx_rings[i];
3775 rx_ring->dcb_tc = 0;
3776 tx_ring->dcb_tc = 0;
3781 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3782 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3785 qoffset = vsi->tc_config.tc_info[n].qoffset;
3786 qcount = vsi->tc_config.tc_info[n].qcount;
3787 for (i = qoffset; i < (qoffset + qcount); i++) {
3788 rx_ring = vsi->rx_rings[i];
3789 tx_ring = vsi->tx_rings[i];
3790 rx_ring->dcb_tc = n;
3791 tx_ring->dcb_tc = n;
3797 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3798 * @vsi: ptr to the VSI
3800 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3803 i40e_set_rx_mode(vsi->netdev);
3807 * i40e_reset_fdir_filter_cnt - Reset flow director filter counters
3808 * @pf: Pointer to the targeted PF
3810 * Set all flow director counters to 0.
3812 static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf)
3814 pf->fd_tcp4_filter_cnt = 0;
3815 pf->fd_udp4_filter_cnt = 0;
3816 pf->fd_sctp4_filter_cnt = 0;
3817 pf->fd_ip4_filter_cnt = 0;
3818 pf->fd_tcp6_filter_cnt = 0;
3819 pf->fd_udp6_filter_cnt = 0;
3820 pf->fd_sctp6_filter_cnt = 0;
3821 pf->fd_ip6_filter_cnt = 0;
3825 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3826 * @vsi: Pointer to the targeted VSI
3828 * This function replays the hlist on the hw where all the SB Flow Director
3829 * filters were saved.
3831 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3833 struct i40e_fdir_filter *filter;
3834 struct i40e_pf *pf = vsi->back;
3835 struct hlist_node *node;
3837 if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
3840 /* Reset FDir counters as we're replaying all existing filters */
3841 i40e_reset_fdir_filter_cnt(pf);
3843 hlist_for_each_entry_safe(filter, node,
3844 &pf->fdir_filter_list, fdir_node) {
3845 i40e_add_del_fdir(vsi, filter, true);
3850 * i40e_vsi_configure - Set up the VSI for action
3851 * @vsi: the VSI being configured
3853 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3857 i40e_set_vsi_rx_mode(vsi);
3858 i40e_restore_vlan(vsi);
3859 i40e_vsi_config_dcb_rings(vsi);
3860 err = i40e_vsi_configure_tx(vsi);
3862 err = i40e_vsi_configure_rx(vsi);
3868 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3869 * @vsi: the VSI being configured
3871 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3873 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3874 struct i40e_pf *pf = vsi->back;
3875 struct i40e_hw *hw = &pf->hw;
3880 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3881 * and PFINT_LNKLSTn registers, e.g.:
3882 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3884 qp = vsi->base_queue;
3885 vector = vsi->base_vector;
3886 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3887 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3889 q_vector->rx.next_update = jiffies + 1;
3890 q_vector->rx.target_itr =
3891 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3892 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3893 q_vector->rx.target_itr >> 1);
3894 q_vector->rx.current_itr = q_vector->rx.target_itr;
3896 q_vector->tx.next_update = jiffies + 1;
3897 q_vector->tx.target_itr =
3898 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3899 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3900 q_vector->tx.target_itr >> 1);
3901 q_vector->tx.current_itr = q_vector->tx.target_itr;
3903 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3904 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3906 /* begin of linked list for RX queue assigned to this vector */
3907 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3908 for (q = 0; q < q_vector->num_ringpairs; q++) {
3909 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3912 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3913 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3914 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3915 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3916 (I40E_QUEUE_TYPE_TX <<
3917 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3919 wr32(hw, I40E_QINT_RQCTL(qp), val);
3922 /* TX queue with next queue set to TX */
3923 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3924 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3925 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3926 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3927 (I40E_QUEUE_TYPE_TX <<
3928 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3930 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3932 /* TX queue with next RX or end of linked list */
3933 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3934 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3935 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3936 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3937 (I40E_QUEUE_TYPE_RX <<
3938 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3940 /* Terminate the linked list */
3941 if (q == (q_vector->num_ringpairs - 1))
3942 val |= (I40E_QUEUE_END_OF_LIST <<
3943 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3945 wr32(hw, I40E_QINT_TQCTL(qp), val);
3954 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3955 * @pf: pointer to private device data structure
3957 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3959 struct i40e_hw *hw = &pf->hw;
3962 /* clear things first */
3963 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3964 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3966 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3967 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3968 I40E_PFINT_ICR0_ENA_GRST_MASK |
3969 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3970 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3971 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3972 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3973 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3975 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags))
3976 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3978 if (test_bit(I40E_FLAG_PTP_ENA, pf->flags))
3979 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3981 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3983 /* SW_ITR_IDX = 0, but don't change INTENA */
3984 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3985 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3987 /* OTHER_ITR_IDX = 0 */
3988 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3992 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3993 * @vsi: the VSI being configured
3995 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3997 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3998 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3999 struct i40e_pf *pf = vsi->back;
4000 struct i40e_hw *hw = &pf->hw;
4002 /* set the ITR configuration */
4003 q_vector->rx.next_update = jiffies + 1;
4004 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
4005 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
4006 q_vector->rx.current_itr = q_vector->rx.target_itr;
4007 q_vector->tx.next_update = jiffies + 1;
4008 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
4009 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
4010 q_vector->tx.current_itr = q_vector->tx.target_itr;
4012 i40e_enable_misc_int_causes(pf);
4014 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
4015 wr32(hw, I40E_PFINT_LNKLST0, 0);
4017 /* Associate the queue pair to the vector and enable the queue
4018 * interrupt RX queue in linked list with next queue set to TX
4020 wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX));
4022 if (i40e_enabled_xdp_vsi(vsi)) {
4023 /* TX queue in linked list with next queue set to TX */
4024 wr32(hw, I40E_QINT_TQCTL(nextqp),
4025 I40E_QINT_TQCTL_VAL(nextqp, 0, TX));
4028 /* last TX queue so the next RX queue doesn't matter */
4029 wr32(hw, I40E_QINT_TQCTL(0),
4030 I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX));
4035 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
4036 * @pf: board private structure
4038 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
4040 struct i40e_hw *hw = &pf->hw;
4042 wr32(hw, I40E_PFINT_DYN_CTL0,
4043 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4048 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
4049 * @pf: board private structure
4051 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
4053 struct i40e_hw *hw = &pf->hw;
4056 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4057 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4058 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4060 wr32(hw, I40E_PFINT_DYN_CTL0, val);
4065 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
4066 * @irq: interrupt number
4067 * @data: pointer to a q_vector
4069 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
4071 struct i40e_q_vector *q_vector = data;
4073 if (!q_vector->tx.ring && !q_vector->rx.ring)
4076 napi_schedule_irqoff(&q_vector->napi);
4082 * i40e_irq_affinity_notify - Callback for affinity changes
4083 * @notify: context as to what irq was changed
4084 * @mask: the new affinity mask
4086 * This is a callback function used by the irq_set_affinity_notifier function
4087 * so that we may register to receive changes to the irq affinity masks.
4089 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
4090 const cpumask_t *mask)
4092 struct i40e_q_vector *q_vector =
4093 container_of(notify, struct i40e_q_vector, affinity_notify);
4095 cpumask_copy(&q_vector->affinity_mask, mask);
4099 * i40e_irq_affinity_release - Callback for affinity notifier release
4100 * @ref: internal core kernel usage
4102 * This is a callback function used by the irq_set_affinity_notifier function
4103 * to inform the current notification subscriber that they will no longer
4104 * receive notifications.
4106 static void i40e_irq_affinity_release(struct kref *ref) {}
4109 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
4110 * @vsi: the VSI being configured
4111 * @basename: name for the vector
4113 * Allocates MSI-X vectors and requests interrupts from the kernel.
4115 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
4117 int q_vectors = vsi->num_q_vectors;
4118 struct i40e_pf *pf = vsi->back;
4119 int base = vsi->base_vector;
4126 for (vector = 0; vector < q_vectors; vector++) {
4127 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
4129 irq_num = pf->msix_entries[base + vector].vector;
4131 if (q_vector->tx.ring && q_vector->rx.ring) {
4132 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
4133 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
4135 } else if (q_vector->rx.ring) {
4136 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
4137 "%s-%s-%d", basename, "rx", rx_int_idx++);
4138 } else if (q_vector->tx.ring) {
4139 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
4140 "%s-%s-%d", basename, "tx", tx_int_idx++);
4142 /* skip this unused q_vector */
4145 err = request_irq(irq_num,
4151 dev_info(&pf->pdev->dev,
4152 "MSIX request_irq failed, error: %d\n", err);
4153 goto free_queue_irqs;
4156 /* register for affinity change notifications */
4157 q_vector->irq_num = irq_num;
4158 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
4159 q_vector->affinity_notify.release = i40e_irq_affinity_release;
4160 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
4161 /* Spread affinity hints out across online CPUs.
4163 * get_cpu_mask returns a static constant mask with
4164 * a permanent lifetime so it's ok to pass to
4165 * irq_update_affinity_hint without making a copy.
4167 cpu = cpumask_local_spread(q_vector->v_idx, -1);
4168 irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
4171 vsi->irqs_ready = true;
4177 irq_num = pf->msix_entries[base + vector].vector;
4178 irq_set_affinity_notifier(irq_num, NULL);
4179 irq_update_affinity_hint(irq_num, NULL);
4180 free_irq(irq_num, &vsi->q_vectors[vector]);
4186 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
4187 * @vsi: the VSI being un-configured
4189 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
4191 struct i40e_pf *pf = vsi->back;
4192 struct i40e_hw *hw = &pf->hw;
4193 int base = vsi->base_vector;
4196 /* disable interrupt causation from each queue */
4197 for (i = 0; i < vsi->num_queue_pairs; i++) {
4200 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
4201 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
4202 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
4204 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
4205 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
4206 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
4208 if (!i40e_enabled_xdp_vsi(vsi))
4210 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
4213 /* disable each interrupt */
4214 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
4215 for (i = vsi->base_vector;
4216 i < (vsi->num_q_vectors + vsi->base_vector); i++)
4217 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
4220 for (i = 0; i < vsi->num_q_vectors; i++)
4221 synchronize_irq(pf->msix_entries[i + base].vector);
4223 /* Legacy and MSI mode - this stops all interrupt handling */
4224 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
4225 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
4227 synchronize_irq(pf->pdev->irq);
4232 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
4233 * @vsi: the VSI being configured
4235 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
4237 struct i40e_pf *pf = vsi->back;
4240 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
4241 for (i = 0; i < vsi->num_q_vectors; i++)
4242 i40e_irq_dynamic_enable(vsi, i);
4244 i40e_irq_dynamic_enable_icr0(pf);
4247 i40e_flush(&pf->hw);
4252 * i40e_free_misc_vector - Free the vector that handles non-queue events
4253 * @pf: board private structure
4255 static void i40e_free_misc_vector(struct i40e_pf *pf)
4258 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
4259 i40e_flush(&pf->hw);
4261 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
4262 free_irq(pf->msix_entries[0].vector, pf);
4263 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
4268 * i40e_intr - MSI/Legacy and non-queue interrupt handler
4269 * @irq: interrupt number
4270 * @data: pointer to a q_vector
4272 * This is the handler used for all MSI/Legacy interrupts, and deals
4273 * with both queue and non-queue interrupts. This is also used in
4274 * MSIX mode to handle the non-queue interrupts.
4276 static irqreturn_t i40e_intr(int irq, void *data)
4278 struct i40e_pf *pf = (struct i40e_pf *)data;
4279 struct i40e_hw *hw = &pf->hw;
4280 irqreturn_t ret = IRQ_NONE;
4281 u32 icr0, icr0_remaining;
4284 icr0 = rd32(hw, I40E_PFINT_ICR0);
4285 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
4287 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
4288 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
4291 /* if interrupt but no bits showing, must be SWINT */
4292 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
4293 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
4296 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) &&
4297 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
4298 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
4299 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
4300 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
4303 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
4304 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
4305 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
4306 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
4308 /* We do not have a way to disarm Queue causes while leaving
4309 * interrupt enabled for all other causes, ideally
4310 * interrupt should be disabled while we are in NAPI but
4311 * this is not a performance path and napi_schedule()
4312 * can deal with rescheduling.
4314 if (!test_bit(__I40E_DOWN, pf->state))
4315 napi_schedule_irqoff(&q_vector->napi);
4318 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4319 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4320 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
4321 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
4324 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
4325 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4326 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
4329 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4330 /* disable any further VFLR event notifications */
4331 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
4332 u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4334 reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
4335 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4337 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
4338 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4342 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
4343 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4344 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
4345 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
4346 val = rd32(hw, I40E_GLGEN_RSTAT);
4347 val = FIELD_GET(I40E_GLGEN_RSTAT_RESET_TYPE_MASK, val);
4348 if (val == I40E_RESET_CORER) {
4350 } else if (val == I40E_RESET_GLOBR) {
4352 } else if (val == I40E_RESET_EMPR) {
4354 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4358 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4359 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4360 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4361 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4362 rd32(hw, I40E_PFHMC_ERRORINFO),
4363 rd32(hw, I40E_PFHMC_ERRORDATA));
4366 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4367 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4369 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK)
4370 schedule_work(&pf->ptp_extts0_work);
4372 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
4373 i40e_ptp_tx_hwtstamp(pf);
4375 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4378 /* If a critical error is pending we have no choice but to reset the
4380 * Report and mask out any remaining unexpected interrupts.
4382 icr0_remaining = icr0 & ena_mask;
4383 if (icr0_remaining) {
4384 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4386 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4387 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4388 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4389 dev_info(&pf->pdev->dev, "device will be reset\n");
4390 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4391 i40e_service_event_schedule(pf);
4393 ena_mask &= ~icr0_remaining;
4398 /* re-enable interrupt causes */
4399 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4400 if (!test_bit(__I40E_DOWN, pf->state) ||
4401 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4402 i40e_service_event_schedule(pf);
4403 i40e_irq_dynamic_enable_icr0(pf);
4410 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4411 * @tx_ring: tx ring to clean
4412 * @budget: how many cleans we're allowed
4414 * Returns true if there's any budget left (e.g. the clean is finished)
4416 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4418 struct i40e_vsi *vsi = tx_ring->vsi;
4419 u16 i = tx_ring->next_to_clean;
4420 struct i40e_tx_buffer *tx_buf;
4421 struct i40e_tx_desc *tx_desc;
4423 tx_buf = &tx_ring->tx_bi[i];
4424 tx_desc = I40E_TX_DESC(tx_ring, i);
4425 i -= tx_ring->count;
4428 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4430 /* if next_to_watch is not set then there is no work pending */
4434 /* prevent any other reads prior to eop_desc */
4437 /* if the descriptor isn't done, no work yet to do */
4438 if (!(eop_desc->cmd_type_offset_bsz &
4439 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4442 /* clear next_to_watch to prevent false hangs */
4443 tx_buf->next_to_watch = NULL;
4445 tx_desc->buffer_addr = 0;
4446 tx_desc->cmd_type_offset_bsz = 0;
4447 /* move past filter desc */
4452 i -= tx_ring->count;
4453 tx_buf = tx_ring->tx_bi;
4454 tx_desc = I40E_TX_DESC(tx_ring, 0);
4456 /* unmap skb header data */
4457 dma_unmap_single(tx_ring->dev,
4458 dma_unmap_addr(tx_buf, dma),
4459 dma_unmap_len(tx_buf, len),
4461 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4462 kfree(tx_buf->raw_buf);
4464 tx_buf->raw_buf = NULL;
4465 tx_buf->tx_flags = 0;
4466 tx_buf->next_to_watch = NULL;
4467 dma_unmap_len_set(tx_buf, len, 0);
4468 tx_desc->buffer_addr = 0;
4469 tx_desc->cmd_type_offset_bsz = 0;
4471 /* move us past the eop_desc for start of next FD desc */
4476 i -= tx_ring->count;
4477 tx_buf = tx_ring->tx_bi;
4478 tx_desc = I40E_TX_DESC(tx_ring, 0);
4481 /* update budget accounting */
4483 } while (likely(budget));
4485 i += tx_ring->count;
4486 tx_ring->next_to_clean = i;
4488 if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags))
4489 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4495 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4496 * @irq: interrupt number
4497 * @data: pointer to a q_vector
4499 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4501 struct i40e_q_vector *q_vector = data;
4502 struct i40e_vsi *vsi;
4504 if (!q_vector->tx.ring)
4507 vsi = q_vector->tx.ring->vsi;
4508 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4514 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4515 * @vsi: the VSI being configured
4516 * @v_idx: vector index
4517 * @qp_idx: queue pair index
4519 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4521 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4522 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4523 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4525 tx_ring->q_vector = q_vector;
4526 tx_ring->next = q_vector->tx.ring;
4527 q_vector->tx.ring = tx_ring;
4528 q_vector->tx.count++;
4530 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4531 if (i40e_enabled_xdp_vsi(vsi)) {
4532 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4534 xdp_ring->q_vector = q_vector;
4535 xdp_ring->next = q_vector->tx.ring;
4536 q_vector->tx.ring = xdp_ring;
4537 q_vector->tx.count++;
4540 rx_ring->q_vector = q_vector;
4541 rx_ring->next = q_vector->rx.ring;
4542 q_vector->rx.ring = rx_ring;
4543 q_vector->rx.count++;
4547 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4548 * @vsi: the VSI being configured
4550 * This function maps descriptor rings to the queue-specific vectors
4551 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4552 * one vector per queue pair, but on a constrained vector budget, we
4553 * group the queue pairs as "efficiently" as possible.
4555 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4557 int qp_remaining = vsi->num_queue_pairs;
4558 int q_vectors = vsi->num_q_vectors;
4563 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4564 * group them so there are multiple queues per vector.
4565 * It is also important to go through all the vectors available to be
4566 * sure that if we don't use all the vectors, that the remaining vectors
4567 * are cleared. This is especially important when decreasing the
4568 * number of queues in use.
4570 for (; v_start < q_vectors; v_start++) {
4571 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4573 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4575 q_vector->num_ringpairs = num_ringpairs;
4576 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4578 q_vector->rx.count = 0;
4579 q_vector->tx.count = 0;
4580 q_vector->rx.ring = NULL;
4581 q_vector->tx.ring = NULL;
4583 while (num_ringpairs--) {
4584 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4592 * i40e_vsi_request_irq - Request IRQ from the OS
4593 * @vsi: the VSI being configured
4594 * @basename: name for the vector
4596 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4598 struct i40e_pf *pf = vsi->back;
4601 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
4602 err = i40e_vsi_request_irq_msix(vsi, basename);
4603 else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags))
4604 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4607 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4611 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4616 #ifdef CONFIG_NET_POLL_CONTROLLER
4618 * i40e_netpoll - A Polling 'interrupt' handler
4619 * @netdev: network interface device structure
4621 * This is used by netconsole to send skbs without having to re-enable
4622 * interrupts. It's not called while the normal interrupt routine is executing.
4624 static void i40e_netpoll(struct net_device *netdev)
4626 struct i40e_netdev_priv *np = netdev_priv(netdev);
4627 struct i40e_vsi *vsi = np->vsi;
4628 struct i40e_pf *pf = vsi->back;
4631 /* if interface is down do nothing */
4632 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4635 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
4636 for (i = 0; i < vsi->num_q_vectors; i++)
4637 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4639 i40e_intr(pf->pdev->irq, netdev);
4644 #define I40E_QTX_ENA_WAIT_COUNT 50
4647 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4648 * @pf: the PF being configured
4649 * @pf_q: the PF queue
4650 * @enable: enable or disable state of the queue
4652 * This routine will wait for the given Tx queue of the PF to reach the
4653 * enabled or disabled state.
4654 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4655 * multiple retries; else will return 0 in case of success.
4657 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4662 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4663 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4664 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4667 usleep_range(10, 20);
4669 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4676 * i40e_control_tx_q - Start or stop a particular Tx queue
4677 * @pf: the PF structure
4678 * @pf_q: the PF queue to configure
4679 * @enable: start or stop the queue
4681 * This function enables or disables a single queue. Note that any delay
4682 * required after the operation is expected to be handled by the caller of
4685 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4687 struct i40e_hw *hw = &pf->hw;
4691 /* warn the TX unit of coming changes */
4692 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4694 usleep_range(10, 20);
4696 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4697 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4698 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4699 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4701 usleep_range(1000, 2000);
4704 /* Skip if the queue is already in the requested state */
4705 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4708 /* turn on/off the queue */
4710 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4711 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4713 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4716 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4720 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4722 * @pf: the PF structure
4723 * @pf_q: the PF queue to configure
4724 * @is_xdp: true if the queue is used for XDP
4725 * @enable: start or stop the queue
4727 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4728 bool is_xdp, bool enable)
4732 i40e_control_tx_q(pf, pf_q, enable);
4734 /* wait for the change to finish */
4735 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4737 dev_info(&pf->pdev->dev,
4738 "VSI seid %d %sTx ring %d %sable timeout\n",
4739 seid, (is_xdp ? "XDP " : ""), pf_q,
4740 (enable ? "en" : "dis"));
4747 * i40e_vsi_enable_tx - Start a VSI's rings
4748 * @vsi: the VSI being configured
4750 static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
4752 struct i40e_pf *pf = vsi->back;
4753 int i, pf_q, ret = 0;
4755 pf_q = vsi->base_queue;
4756 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4757 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4759 false /*is xdp*/, true);
4763 if (!i40e_enabled_xdp_vsi(vsi))
4766 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4767 pf_q + vsi->alloc_queue_pairs,
4768 true /*is xdp*/, true);
4776 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4777 * @pf: the PF being configured
4778 * @pf_q: the PF queue
4779 * @enable: enable or disable state of the queue
4781 * This routine will wait for the given Rx queue of the PF to reach the
4782 * enabled or disabled state.
4783 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4784 * multiple retries; else will return 0 in case of success.
4786 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4791 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4792 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4793 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4796 usleep_range(10, 20);
4798 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4805 * i40e_control_rx_q - Start or stop a particular Rx queue
4806 * @pf: the PF structure
4807 * @pf_q: the PF queue to configure
4808 * @enable: start or stop the queue
4810 * This function enables or disables a single queue. Note that
4811 * any delay required after the operation is expected to be
4812 * handled by the caller of this function.
4814 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4816 struct i40e_hw *hw = &pf->hw;
4820 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4821 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4822 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4823 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4825 usleep_range(1000, 2000);
4828 /* Skip if the queue is already in the requested state */
4829 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4832 /* turn on/off the queue */
4834 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4836 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4838 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4842 * i40e_control_wait_rx_q
4843 * @pf: the PF structure
4844 * @pf_q: queue being configured
4845 * @enable: start or stop the rings
4847 * This function enables or disables a single queue along with waiting
4848 * for the change to finish. The caller of this function should handle
4849 * the delays needed in the case of disabling queues.
4851 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4855 i40e_control_rx_q(pf, pf_q, enable);
4857 /* wait for the change to finish */
4858 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4866 * i40e_vsi_enable_rx - Start a VSI's rings
4867 * @vsi: the VSI being configured
4869 static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
4871 struct i40e_pf *pf = vsi->back;
4872 int i, pf_q, ret = 0;
4874 pf_q = vsi->base_queue;
4875 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4876 ret = i40e_control_wait_rx_q(pf, pf_q, true);
4878 dev_info(&pf->pdev->dev,
4879 "VSI seid %d Rx ring %d enable timeout\n",
4889 * i40e_vsi_start_rings - Start a VSI's rings
4890 * @vsi: the VSI being configured
4892 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4896 /* do rx first for enable and last for disable */
4897 ret = i40e_vsi_enable_rx(vsi);
4900 ret = i40e_vsi_enable_tx(vsi);
4905 #define I40E_DISABLE_TX_GAP_MSEC 50
4908 * i40e_vsi_stop_rings - Stop a VSI's rings
4909 * @vsi: the VSI being configured
4911 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4913 struct i40e_pf *pf = vsi->back;
4914 int pf_q, err, q_end;
4916 /* When port TX is suspended, don't wait */
4917 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4918 return i40e_vsi_stop_rings_no_wait(vsi);
4920 q_end = vsi->base_queue + vsi->num_queue_pairs;
4921 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4922 i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
4924 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
4925 err = i40e_control_wait_rx_q(pf, pf_q, false);
4927 dev_info(&pf->pdev->dev,
4928 "VSI seid %d Rx ring %d disable timeout\n",
4932 msleep(I40E_DISABLE_TX_GAP_MSEC);
4933 pf_q = vsi->base_queue;
4934 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4935 wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
4937 i40e_vsi_wait_queues_disabled(vsi);
4941 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4942 * @vsi: the VSI being shutdown
4944 * This function stops all the rings for a VSI but does not delay to verify
4945 * that rings have been disabled. It is expected that the caller is shutting
4946 * down multiple VSIs at once and will delay together for all the VSIs after
4947 * initiating the shutdown. This is particularly useful for shutting down lots
4948 * of VFs together. Otherwise, a large delay can be incurred while configuring
4949 * each VSI in serial.
4951 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4953 struct i40e_pf *pf = vsi->back;
4956 pf_q = vsi->base_queue;
4957 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4958 i40e_control_tx_q(pf, pf_q, false);
4959 i40e_control_rx_q(pf, pf_q, false);
4964 * i40e_vsi_free_irq - Free the irq association with the OS
4965 * @vsi: the VSI being configured
4967 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4969 struct i40e_pf *pf = vsi->back;
4970 struct i40e_hw *hw = &pf->hw;
4971 int base = vsi->base_vector;
4975 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
4976 if (!vsi->q_vectors)
4979 if (!vsi->irqs_ready)
4982 vsi->irqs_ready = false;
4983 for (i = 0; i < vsi->num_q_vectors; i++) {
4988 irq_num = pf->msix_entries[vector].vector;
4990 /* free only the irqs that were actually requested */
4991 if (!vsi->q_vectors[i] ||
4992 !vsi->q_vectors[i]->num_ringpairs)
4995 /* clear the affinity notifier in the IRQ descriptor */
4996 irq_set_affinity_notifier(irq_num, NULL);
4997 /* remove our suggested affinity mask for this IRQ */
4998 irq_update_affinity_hint(irq_num, NULL);
4999 free_irq(irq_num, vsi->q_vectors[i]);
5001 /* Tear down the interrupt queue link list
5003 * We know that they come in pairs and always
5004 * the Rx first, then the Tx. To clear the
5005 * link list, stick the EOL value into the
5006 * next_q field of the registers.
5008 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
5009 qp = FIELD_GET(I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK,
5011 val |= I40E_QUEUE_END_OF_LIST
5012 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
5013 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
5015 while (qp != I40E_QUEUE_END_OF_LIST) {
5018 val = rd32(hw, I40E_QINT_RQCTL(qp));
5020 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
5021 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
5022 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5023 I40E_QINT_RQCTL_INTEVENT_MASK);
5025 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
5026 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
5028 wr32(hw, I40E_QINT_RQCTL(qp), val);
5030 val = rd32(hw, I40E_QINT_TQCTL(qp));
5032 next = FIELD_GET(I40E_QINT_TQCTL_NEXTQ_INDX_MASK,
5035 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
5036 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
5037 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
5038 I40E_QINT_TQCTL_INTEVENT_MASK);
5040 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
5041 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
5043 wr32(hw, I40E_QINT_TQCTL(qp), val);
5048 free_irq(pf->pdev->irq, pf);
5050 val = rd32(hw, I40E_PFINT_LNKLST0);
5051 qp = FIELD_GET(I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK, val);
5052 val |= I40E_QUEUE_END_OF_LIST
5053 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5054 wr32(hw, I40E_PFINT_LNKLST0, val);
5056 val = rd32(hw, I40E_QINT_RQCTL(qp));
5057 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
5058 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
5059 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5060 I40E_QINT_RQCTL_INTEVENT_MASK);
5062 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
5063 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
5065 wr32(hw, I40E_QINT_RQCTL(qp), val);
5067 val = rd32(hw, I40E_QINT_TQCTL(qp));
5069 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
5070 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
5071 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
5072 I40E_QINT_TQCTL_INTEVENT_MASK);
5074 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
5075 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
5077 wr32(hw, I40E_QINT_TQCTL(qp), val);
5082 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
5083 * @vsi: the VSI being configured
5084 * @v_idx: Index of vector to be freed
5086 * This function frees the memory allocated to the q_vector. In addition if
5087 * NAPI is enabled it will delete any references to the NAPI struct prior
5088 * to freeing the q_vector.
5090 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
5092 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
5093 struct i40e_ring *ring;
5098 /* disassociate q_vector from rings */
5099 i40e_for_each_ring(ring, q_vector->tx)
5100 ring->q_vector = NULL;
5102 i40e_for_each_ring(ring, q_vector->rx)
5103 ring->q_vector = NULL;
5105 /* only VSI w/ an associated netdev is set up w/ NAPI */
5107 netif_napi_del(&q_vector->napi);
5109 vsi->q_vectors[v_idx] = NULL;
5111 kfree_rcu(q_vector, rcu);
5115 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
5116 * @vsi: the VSI being un-configured
5118 * This frees the memory allocated to the q_vectors and
5119 * deletes references to the NAPI struct.
5121 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
5125 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
5126 i40e_free_q_vector(vsi, v_idx);
5130 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
5131 * @pf: board private structure
5133 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
5135 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
5136 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
5137 pci_disable_msix(pf->pdev);
5138 kfree(pf->msix_entries);
5139 pf->msix_entries = NULL;
5140 kfree(pf->irq_pile);
5141 pf->irq_pile = NULL;
5142 } else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags)) {
5143 pci_disable_msi(pf->pdev);
5145 clear_bit(I40E_FLAG_MSI_ENA, pf->flags);
5146 clear_bit(I40E_FLAG_MSIX_ENA, pf->flags);
5150 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
5151 * @pf: board private structure
5153 * We go through and clear interrupt specific resources and reset the structure
5154 * to pre-load conditions
5156 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
5160 if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
5161 i40e_free_misc_vector(pf);
5163 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
5164 I40E_IWARP_IRQ_PILE_ID);
5166 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
5167 for (i = 0; i < pf->num_alloc_vsi; i++)
5169 i40e_vsi_free_q_vectors(pf->vsi[i]);
5170 i40e_reset_interrupt_capability(pf);
5174 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5175 * @vsi: the VSI being configured
5177 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
5184 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
5185 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
5187 if (q_vector->rx.ring || q_vector->tx.ring)
5188 napi_enable(&q_vector->napi);
5193 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5194 * @vsi: the VSI being configured
5196 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
5203 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
5204 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
5206 if (q_vector->rx.ring || q_vector->tx.ring)
5207 napi_disable(&q_vector->napi);
5212 * i40e_vsi_close - Shut down a VSI
5213 * @vsi: the vsi to be quelled
5215 static void i40e_vsi_close(struct i40e_vsi *vsi)
5217 struct i40e_pf *pf = vsi->back;
5218 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
5220 i40e_vsi_free_irq(vsi);
5221 i40e_vsi_free_tx_resources(vsi);
5222 i40e_vsi_free_rx_resources(vsi);
5223 vsi->current_netdev_flags = 0;
5224 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
5225 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
5226 set_bit(__I40E_CLIENT_RESET, pf->state);
5230 * i40e_quiesce_vsi - Pause a given VSI
5231 * @vsi: the VSI being paused
5233 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
5235 if (test_bit(__I40E_VSI_DOWN, vsi->state))
5238 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
5239 if (vsi->netdev && netif_running(vsi->netdev))
5240 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
5242 i40e_vsi_close(vsi);
5246 * i40e_unquiesce_vsi - Resume a given VSI
5247 * @vsi: the VSI being resumed
5249 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
5251 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
5254 if (vsi->netdev && netif_running(vsi->netdev))
5255 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
5257 i40e_vsi_open(vsi); /* this clears the DOWN bit */
5261 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
5264 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
5268 for (v = 0; v < pf->num_alloc_vsi; v++) {
5270 i40e_quiesce_vsi(pf->vsi[v]);
5275 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
5278 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
5282 for (v = 0; v < pf->num_alloc_vsi; v++) {
5284 i40e_unquiesce_vsi(pf->vsi[v]);
5289 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
5290 * @vsi: the VSI being configured
5292 * Wait until all queues on a given VSI have been disabled.
5294 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
5296 struct i40e_pf *pf = vsi->back;
5299 pf_q = vsi->base_queue;
5300 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
5301 /* Check and wait for the Tx queue */
5302 ret = i40e_pf_txq_wait(pf, pf_q, false);
5304 dev_info(&pf->pdev->dev,
5305 "VSI seid %d Tx ring %d disable timeout\n",
5310 if (!i40e_enabled_xdp_vsi(vsi))
5313 /* Check and wait for the XDP Tx queue */
5314 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
5317 dev_info(&pf->pdev->dev,
5318 "VSI seid %d XDP Tx ring %d disable timeout\n",
5323 /* Check and wait for the Rx queue */
5324 ret = i40e_pf_rxq_wait(pf, pf_q, false);
5326 dev_info(&pf->pdev->dev,
5327 "VSI seid %d Rx ring %d disable timeout\n",
5336 #ifdef CONFIG_I40E_DCB
5338 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5341 * This function waits for the queues to be in disabled state for all the
5342 * VSIs that are managed by this PF.
5344 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
5348 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5350 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
5362 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
5363 * @pf: pointer to PF
5365 * Get TC map for ISCSI PF type that will include iSCSI TC
5368 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5370 struct i40e_dcb_app_priority_table app;
5371 struct i40e_hw *hw = &pf->hw;
5372 u8 enabled_tc = 1; /* TC0 is always enabled */
5374 /* Get the iSCSI APP TLV */
5375 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5377 for (i = 0; i < dcbcfg->numapps; i++) {
5378 app = dcbcfg->app[i];
5379 if (app.selector == I40E_APP_SEL_TCPIP &&
5380 app.protocolid == I40E_APP_PROTOID_ISCSI) {
5381 tc = dcbcfg->etscfg.prioritytable[app.priority];
5382 enabled_tc |= BIT(tc);
5391 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
5392 * @dcbcfg: the corresponding DCBx configuration structure
5394 * Return the number of TCs from given DCBx configuration
5396 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5398 int i, tc_unused = 0;
5402 /* Scan the ETS Config Priority Table to find
5403 * traffic class enabled for a given priority
5404 * and create a bitmask of enabled TCs
5406 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5407 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5409 /* Now scan the bitmask to check for
5410 * contiguous TCs starting with TC0
5412 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5413 if (num_tc & BIT(i)) {
5417 pr_err("Non-contiguous TC - Disabling DCB\n");
5425 /* There is always at least TC0 */
5433 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5434 * @dcbcfg: the corresponding DCBx configuration structure
5436 * Query the current DCB configuration and return the number of
5437 * traffic classes enabled from the given DCBX config
5439 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5441 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5445 for (i = 0; i < num_tc; i++)
5446 enabled_tc |= BIT(i);
5452 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5453 * @pf: PF being queried
5455 * Query the current MQPRIO configuration and return the number of
5456 * traffic classes enabled.
5458 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5460 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5461 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5462 u8 enabled_tc = 1, i;
5464 for (i = 1; i < num_tc; i++)
5465 enabled_tc |= BIT(i);
5470 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5471 * @pf: PF being queried
5473 * Return number of traffic classes enabled for the given PF
5475 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5477 struct i40e_hw *hw = &pf->hw;
5478 u8 i, enabled_tc = 1;
5480 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5482 if (i40e_is_tc_mqprio_enabled(pf))
5483 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5485 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5486 if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags))
5489 /* SFP mode will be enabled for all TCs on port */
5490 if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
5491 return i40e_dcb_get_num_tc(dcbcfg);
5493 /* MFP mode return count of enabled TCs for this PF */
5494 if (pf->hw.func_caps.iscsi)
5495 enabled_tc = i40e_get_iscsi_tc_map(pf);
5497 return 1; /* Only TC0 */
5499 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5500 if (enabled_tc & BIT(i))
5507 * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes
5508 * @pf: PF being queried
5510 * Return a bitmap for enabled traffic classes for this PF.
5512 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5514 if (i40e_is_tc_mqprio_enabled(pf))
5515 return i40e_mqprio_get_enabled_tc(pf);
5517 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5520 if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags))
5521 return I40E_DEFAULT_TRAFFIC_CLASS;
5523 /* SFP mode we want PF to be enabled for all TCs */
5524 if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
5525 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5527 /* MFP enabled and iSCSI PF type */
5528 if (pf->hw.func_caps.iscsi)
5529 return i40e_get_iscsi_tc_map(pf);
5531 return I40E_DEFAULT_TRAFFIC_CLASS;
5535 * i40e_vsi_get_bw_info - Query VSI BW Information
5536 * @vsi: the VSI being queried
5538 * Returns 0 on success, negative value on failure
5540 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5542 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5543 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5544 struct i40e_pf *pf = vsi->back;
5545 struct i40e_hw *hw = &pf->hw;
5550 /* Get the VSI level BW configuration */
5551 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5553 dev_info(&pf->pdev->dev,
5554 "couldn't get PF vsi bw config, err %pe aq_err %s\n",
5556 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5560 /* Get the VSI level BW configuration per TC */
5561 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5564 dev_info(&pf->pdev->dev,
5565 "couldn't get PF vsi ets bw config, err %pe aq_err %s\n",
5567 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5571 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5572 dev_info(&pf->pdev->dev,
5573 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5574 bw_config.tc_valid_bits,
5575 bw_ets_config.tc_valid_bits);
5576 /* Still continuing */
5579 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5580 vsi->bw_max_quanta = bw_config.max_bw;
5581 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5582 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5583 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5584 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5585 vsi->bw_ets_limit_credits[i] =
5586 le16_to_cpu(bw_ets_config.credits[i]);
5587 /* 3 bits out of 4 for each TC */
5588 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5595 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5596 * @vsi: the VSI being configured
5597 * @enabled_tc: TC bitmap
5598 * @bw_share: BW shared credits per TC
5600 * Returns 0 on success, negative value on failure
5602 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5605 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5606 struct i40e_pf *pf = vsi->back;
5610 /* There is no need to reset BW when mqprio mode is on. */
5611 if (i40e_is_tc_mqprio_enabled(pf))
5613 if (!vsi->mqprio_qopt.qopt.hw && !test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
5614 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5616 dev_info(&pf->pdev->dev,
5617 "Failed to reset tx rate for vsi->seid %u\n",
5621 memset(&bw_data, 0, sizeof(bw_data));
5622 bw_data.tc_valid_bits = enabled_tc;
5623 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5624 bw_data.tc_bw_credits[i] = bw_share[i];
5626 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5628 dev_info(&pf->pdev->dev,
5629 "AQ command Config VSI BW allocation per TC failed = %d\n",
5630 pf->hw.aq.asq_last_status);
5634 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5635 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5641 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5642 * @vsi: the VSI being configured
5643 * @enabled_tc: TC map to be enabled
5646 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5648 struct net_device *netdev = vsi->netdev;
5649 struct i40e_pf *pf = vsi->back;
5650 struct i40e_hw *hw = &pf->hw;
5653 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5659 netdev_reset_tc(netdev);
5663 /* Set up actual enabled TCs on the VSI */
5664 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5667 /* set per TC queues for the VSI */
5668 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5669 /* Only set TC queues for enabled tcs
5671 * e.g. For a VSI that has TC0 and TC3 enabled the
5672 * enabled_tc bitmap would be 0x00001001; the driver
5673 * will set the numtc for netdev as 2 that will be
5674 * referenced by the netdev layer as TC 0 and 1.
5676 if (vsi->tc_config.enabled_tc & BIT(i))
5677 netdev_set_tc_queue(netdev,
5678 vsi->tc_config.tc_info[i].netdev_tc,
5679 vsi->tc_config.tc_info[i].qcount,
5680 vsi->tc_config.tc_info[i].qoffset);
5683 if (i40e_is_tc_mqprio_enabled(pf))
5686 /* Assign UP2TC map for the VSI */
5687 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5688 /* Get the actual TC# for the UP */
5689 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5690 /* Get the mapped netdev TC# for the UP */
5691 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5692 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5697 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5698 * @vsi: the VSI being configured
5699 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5701 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5702 struct i40e_vsi_context *ctxt)
5704 /* copy just the sections touched not the entire info
5705 * since not all sections are valid as returned by
5708 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5709 memcpy(&vsi->info.queue_mapping,
5710 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5711 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5712 sizeof(vsi->info.tc_mapping));
5716 * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
5717 * @vsi: the VSI being reconfigured
5718 * @vsi_offset: offset from main VF VSI
5720 int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
5722 struct i40e_vsi_context ctxt = {};
5732 ctxt.seid = vsi->seid;
5733 ctxt.pf_num = hw->pf_id;
5734 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset;
5735 ctxt.uplink_seid = vsi->uplink_seid;
5736 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5737 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5738 ctxt.info = vsi->info;
5740 i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc,
5742 if (vsi->reconfig_rss) {
5743 vsi->rss_size = min_t(int, pf->alloc_rss_size,
5744 vsi->num_queue_pairs);
5745 ret = i40e_vsi_config_rss(vsi);
5747 dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n");
5750 vsi->reconfig_rss = false;
5753 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5755 dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n",
5757 i40e_aq_str(hw, hw->aq.asq_last_status));
5760 /* update the local VSI info with updated queue map */
5761 i40e_vsi_update_queue_map(vsi, &ctxt);
5762 vsi->info.valid_sections = 0;
5768 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5769 * @vsi: VSI to be configured
5770 * @enabled_tc: TC bitmap
5772 * This configures a particular VSI for TCs that are mapped to the
5773 * given TC bitmap. It uses default bandwidth share for TCs across
5774 * VSIs to configure TC for a particular VSI.
5777 * It is expected that the VSI queues have been quisced before calling
5780 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5782 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5783 struct i40e_pf *pf = vsi->back;
5784 struct i40e_hw *hw = &pf->hw;
5785 struct i40e_vsi_context ctxt;
5789 /* Check if enabled_tc is same as existing or new TCs */
5790 if (vsi->tc_config.enabled_tc == enabled_tc &&
5791 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5794 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5795 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5796 if (enabled_tc & BIT(i))
5800 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5802 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5804 dev_info(&pf->pdev->dev,
5805 "Failed configuring TC map %d for VSI %d\n",
5806 enabled_tc, vsi->seid);
5807 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5810 dev_info(&pf->pdev->dev,
5811 "Failed querying vsi bw info, err %pe aq_err %s\n",
5813 i40e_aq_str(hw, hw->aq.asq_last_status));
5816 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5817 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5820 valid_tc = bw_config.tc_valid_bits;
5821 /* Always enable TC0, no matter what */
5823 dev_info(&pf->pdev->dev,
5824 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5825 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5826 enabled_tc = valid_tc;
5829 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5831 dev_err(&pf->pdev->dev,
5832 "Unable to configure TC map %d for VSI %d\n",
5833 enabled_tc, vsi->seid);
5838 /* Update Queue Pairs Mapping for currently enabled UPs */
5839 ctxt.seid = vsi->seid;
5840 ctxt.pf_num = vsi->back->hw.pf_id;
5842 ctxt.uplink_seid = vsi->uplink_seid;
5843 ctxt.info = vsi->info;
5844 if (i40e_is_tc_mqprio_enabled(pf)) {
5845 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5849 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5852 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5855 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5856 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5857 vsi->num_queue_pairs);
5858 ret = i40e_vsi_config_rss(vsi);
5860 dev_info(&vsi->back->pdev->dev,
5861 "Failed to reconfig rss for num_queues\n");
5864 vsi->reconfig_rss = false;
5866 if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) {
5867 ctxt.info.valid_sections |=
5868 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5869 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5872 /* Update the VSI after updating the VSI queue-mapping
5875 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5877 dev_info(&pf->pdev->dev,
5878 "Update vsi tc config failed, err %pe aq_err %s\n",
5880 i40e_aq_str(hw, hw->aq.asq_last_status));
5883 /* update the local VSI info with updated queue map */
5884 i40e_vsi_update_queue_map(vsi, &ctxt);
5885 vsi->info.valid_sections = 0;
5887 /* Update current VSI BW information */
5888 ret = i40e_vsi_get_bw_info(vsi);
5890 dev_info(&pf->pdev->dev,
5891 "Failed updating vsi bw info, err %pe aq_err %s\n",
5893 i40e_aq_str(hw, hw->aq.asq_last_status));
5897 /* Update the netdev TC setup */
5898 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5904 * i40e_get_link_speed - Returns link speed for the interface
5905 * @vsi: VSI to be configured
5908 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5910 struct i40e_pf *pf = vsi->back;
5912 switch (pf->hw.phy.link_info.link_speed) {
5913 case I40E_LINK_SPEED_40GB:
5915 case I40E_LINK_SPEED_25GB:
5917 case I40E_LINK_SPEED_20GB:
5919 case I40E_LINK_SPEED_10GB:
5921 case I40E_LINK_SPEED_1GB:
5929 * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
5930 * @vsi: Pointer to vsi structure
5931 * @max_tx_rate: max TX rate in bytes to be converted into Mbits
5933 * Helper function to convert units before send to set BW limit
5935 static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
5937 if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
5938 dev_warn(&vsi->back->pdev->dev,
5939 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5940 max_tx_rate = I40E_BW_CREDIT_DIVISOR;
5942 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
5949 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5950 * @vsi: VSI to be configured
5951 * @seid: seid of the channel/VSI
5952 * @max_tx_rate: max TX rate to be configured as BW limit
5954 * Helper function to set BW limit for a given VSI
5956 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5958 struct i40e_pf *pf = vsi->back;
5963 speed = i40e_get_link_speed(vsi);
5964 if (max_tx_rate > speed) {
5965 dev_err(&pf->pdev->dev,
5966 "Invalid max tx rate %llu specified for VSI seid %d.",
5970 if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
5971 dev_warn(&pf->pdev->dev,
5972 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5973 max_tx_rate = I40E_BW_CREDIT_DIVISOR;
5976 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5977 credits = max_tx_rate;
5978 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5979 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5980 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5982 dev_err(&pf->pdev->dev,
5983 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n",
5984 max_tx_rate, seid, ERR_PTR(ret),
5985 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5990 * i40e_remove_queue_channels - Remove queue channels for the TCs
5991 * @vsi: VSI to be configured
5993 * Remove queue channels for the TCs
5995 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5997 enum i40e_admin_queue_err last_aq_status;
5998 struct i40e_cloud_filter *cfilter;
5999 struct i40e_channel *ch, *ch_tmp;
6000 struct i40e_pf *pf = vsi->back;
6001 struct hlist_node *node;
6004 /* Reset rss size that was stored when reconfiguring rss for
6005 * channel VSIs with non-power-of-2 queue count.
6007 vsi->current_rss_size = 0;
6009 /* perform cleanup for channels if they exist */
6010 if (list_empty(&vsi->ch_list))
6013 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
6014 struct i40e_vsi *p_vsi;
6016 list_del(&ch->list);
6017 p_vsi = ch->parent_vsi;
6018 if (!p_vsi || !ch->initialized) {
6022 /* Reset queue contexts */
6023 for (i = 0; i < ch->num_queue_pairs; i++) {
6024 struct i40e_ring *tx_ring, *rx_ring;
6027 pf_q = ch->base_queue + i;
6028 tx_ring = vsi->tx_rings[pf_q];
6031 rx_ring = vsi->rx_rings[pf_q];
6035 /* Reset BW configured for this VSI via mqprio */
6036 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
6038 dev_info(&vsi->back->pdev->dev,
6039 "Failed to reset tx rate for ch->seid %u\n",
6042 /* delete cloud filters associated with this channel */
6043 hlist_for_each_entry_safe(cfilter, node,
6044 &pf->cloud_filter_list, cloud_node) {
6045 if (cfilter->seid != ch->seid)
6048 hash_del(&cfilter->cloud_node);
6049 if (cfilter->dst_port)
6050 ret = i40e_add_del_cloud_filter_big_buf(vsi,
6054 ret = i40e_add_del_cloud_filter(vsi, cfilter,
6056 last_aq_status = pf->hw.aq.asq_last_status;
6058 dev_info(&pf->pdev->dev,
6059 "Failed to delete cloud filter, err %pe aq_err %s\n",
6061 i40e_aq_str(&pf->hw, last_aq_status));
6065 /* delete VSI from FW */
6066 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
6069 dev_err(&vsi->back->pdev->dev,
6070 "unable to remove channel (%d) for parent VSI(%d)\n",
6071 ch->seid, p_vsi->seid);
6074 INIT_LIST_HEAD(&vsi->ch_list);
6078 * i40e_get_max_queues_for_channel
6079 * @vsi: ptr to VSI to which channels are associated with
6081 * Helper function which returns max value among the queue counts set on the
6082 * channels/TCs created.
6084 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
6086 struct i40e_channel *ch, *ch_tmp;
6089 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
6090 if (!ch->initialized)
6092 if (ch->num_queue_pairs > max)
6093 max = ch->num_queue_pairs;
6100 * i40e_validate_num_queues - validate num_queues w.r.t channel
6101 * @pf: ptr to PF device
6102 * @num_queues: number of queues
6103 * @vsi: the parent VSI
6104 * @reconfig_rss: indicates should the RSS be reconfigured or not
6106 * This function validates number of queues in the context of new channel
6107 * which is being established and determines if RSS should be reconfigured
6108 * or not for parent VSI.
6110 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
6111 struct i40e_vsi *vsi, bool *reconfig_rss)
6118 *reconfig_rss = false;
6119 if (vsi->current_rss_size) {
6120 if (num_queues > vsi->current_rss_size) {
6121 dev_dbg(&pf->pdev->dev,
6122 "Error: num_queues (%d) > vsi's current_size(%d)\n",
6123 num_queues, vsi->current_rss_size);
6125 } else if ((num_queues < vsi->current_rss_size) &&
6126 (!is_power_of_2(num_queues))) {
6127 dev_dbg(&pf->pdev->dev,
6128 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
6129 num_queues, vsi->current_rss_size);
6134 if (!is_power_of_2(num_queues)) {
6135 /* Find the max num_queues configured for channel if channel
6137 * if channel exist, then enforce 'num_queues' to be more than
6138 * max ever queues configured for channel.
6140 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
6141 if (num_queues < max_ch_queues) {
6142 dev_dbg(&pf->pdev->dev,
6143 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
6144 num_queues, max_ch_queues);
6147 *reconfig_rss = true;
6154 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
6155 * @vsi: the VSI being setup
6156 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
6158 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
6160 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
6162 struct i40e_pf *pf = vsi->back;
6163 u8 seed[I40E_HKEY_ARRAY_SIZE];
6164 struct i40e_hw *hw = &pf->hw;
6172 if (rss_size > vsi->rss_size)
6175 local_rss_size = min_t(int, vsi->rss_size, rss_size);
6176 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
6180 /* Ignoring user configured lut if there is one */
6181 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
6183 /* Use user configured hash key if there is one, otherwise
6186 if (vsi->rss_hkey_user)
6187 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
6189 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
6191 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
6193 dev_info(&pf->pdev->dev,
6194 "Cannot set RSS lut, err %pe aq_err %s\n",
6196 i40e_aq_str(hw, hw->aq.asq_last_status));
6202 /* Do the update w.r.t. storing rss_size */
6203 if (!vsi->orig_rss_size)
6204 vsi->orig_rss_size = vsi->rss_size;
6205 vsi->current_rss_size = local_rss_size;
6211 * i40e_channel_setup_queue_map - Setup a channel queue map
6212 * @pf: ptr to PF device
6213 * @ctxt: VSI context structure
6214 * @ch: ptr to channel structure
6216 * Setup queue map for a specific channel
6218 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
6219 struct i40e_vsi_context *ctxt,
6220 struct i40e_channel *ch)
6222 u16 qcount, qmap, sections = 0;
6226 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
6227 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
6229 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
6230 ch->num_queue_pairs = qcount;
6232 /* find the next higher power-of-2 of num queue pairs */
6233 pow = ilog2(qcount);
6234 if (!is_power_of_2(qcount))
6237 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
6238 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
6240 /* Setup queue TC[0].qmap for given VSI context */
6241 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
6243 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
6244 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
6245 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
6246 ctxt->info.valid_sections |= cpu_to_le16(sections);
6250 * i40e_add_channel - add a channel by adding VSI
6251 * @pf: ptr to PF device
6252 * @uplink_seid: underlying HW switching element (VEB) ID
6253 * @ch: ptr to channel structure
6255 * Add a channel (VSI) using add_vsi and queue_map
6257 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
6258 struct i40e_channel *ch)
6260 struct i40e_hw *hw = &pf->hw;
6261 struct i40e_vsi_context ctxt;
6262 u8 enabled_tc = 0x1; /* TC0 enabled */
6265 if (ch->type != I40E_VSI_VMDQ2) {
6266 dev_info(&pf->pdev->dev,
6267 "add new vsi failed, ch->type %d\n", ch->type);
6271 memset(&ctxt, 0, sizeof(ctxt));
6272 ctxt.pf_num = hw->pf_id;
6274 ctxt.uplink_seid = uplink_seid;
6275 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
6276 if (ch->type == I40E_VSI_VMDQ2)
6277 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6279 if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
6280 ctxt.info.valid_sections |=
6281 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6282 ctxt.info.switch_id =
6283 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6286 /* Set queue map for a given VSI context */
6287 i40e_channel_setup_queue_map(pf, &ctxt, ch);
6289 /* Now time to create VSI */
6290 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6292 dev_info(&pf->pdev->dev,
6293 "add new vsi failed, err %pe aq_err %s\n",
6295 i40e_aq_str(&pf->hw,
6296 pf->hw.aq.asq_last_status));
6300 /* Success, update channel, set enabled_tc only if the channel
6303 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
6304 ch->seid = ctxt.seid;
6305 ch->vsi_number = ctxt.vsi_number;
6306 ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
6308 /* copy just the sections touched not the entire info
6309 * since not all sections are valid as returned by
6312 ch->info.mapping_flags = ctxt.info.mapping_flags;
6313 memcpy(&ch->info.queue_mapping,
6314 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
6315 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
6316 sizeof(ctxt.info.tc_mapping));
6321 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
6324 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
6328 memset(&bw_data, 0, sizeof(bw_data));
6329 bw_data.tc_valid_bits = ch->enabled_tc;
6330 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6331 bw_data.tc_bw_credits[i] = bw_share[i];
6333 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
6336 dev_info(&vsi->back->pdev->dev,
6337 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
6338 vsi->back->hw.aq.asq_last_status, ch->seid);
6342 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6343 ch->info.qs_handle[i] = bw_data.qs_handles[i];
6349 * i40e_channel_config_tx_ring - config TX ring associated with new channel
6350 * @pf: ptr to PF device
6351 * @vsi: the VSI being setup
6352 * @ch: ptr to channel structure
6354 * Configure TX rings associated with channel (VSI) since queues are being
6357 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
6358 struct i40e_vsi *vsi,
6359 struct i40e_channel *ch)
6361 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
6365 /* Enable ETS TCs with equal BW Share for now across all VSIs */
6366 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6367 if (ch->enabled_tc & BIT(i))
6371 /* configure BW for new VSI */
6372 ret = i40e_channel_config_bw(vsi, ch, bw_share);
6374 dev_info(&vsi->back->pdev->dev,
6375 "Failed configuring TC map %d for channel (seid %u)\n",
6376 ch->enabled_tc, ch->seid);
6380 for (i = 0; i < ch->num_queue_pairs; i++) {
6381 struct i40e_ring *tx_ring, *rx_ring;
6384 pf_q = ch->base_queue + i;
6386 /* Get to TX ring ptr of main VSI, for re-setup TX queue
6389 tx_ring = vsi->tx_rings[pf_q];
6392 /* Get the RX ring ptr */
6393 rx_ring = vsi->rx_rings[pf_q];
6401 * i40e_setup_hw_channel - setup new channel
6402 * @pf: ptr to PF device
6403 * @vsi: the VSI being setup
6404 * @ch: ptr to channel structure
6405 * @uplink_seid: underlying HW switching element (VEB) ID
6406 * @type: type of channel to be created (VMDq2/VF)
6408 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6409 * and configures TX rings accordingly
6411 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
6412 struct i40e_vsi *vsi,
6413 struct i40e_channel *ch,
6414 u16 uplink_seid, u8 type)
6418 ch->initialized = false;
6419 ch->base_queue = vsi->next_base_queue;
6422 /* Proceed with creation of channel (VMDq2) VSI */
6423 ret = i40e_add_channel(pf, uplink_seid, ch);
6425 dev_info(&pf->pdev->dev,
6426 "failed to add_channel using uplink_seid %u\n",
6431 /* Mark the successful creation of channel */
6432 ch->initialized = true;
6434 /* Reconfigure TX queues using QTX_CTL register */
6435 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6437 dev_info(&pf->pdev->dev,
6438 "failed to configure TX rings for channel %u\n",
6443 /* update 'next_base_queue' */
6444 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6445 dev_dbg(&pf->pdev->dev,
6446 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6447 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6448 ch->num_queue_pairs,
6449 vsi->next_base_queue);
6454 * i40e_setup_channel - setup new channel using uplink element
6455 * @pf: ptr to PF device
6456 * @vsi: pointer to the VSI to set up the channel within
6457 * @ch: ptr to channel structure
6459 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6460 * and uplink switching element (uplink_seid)
6462 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6463 struct i40e_channel *ch)
6469 if (vsi->type == I40E_VSI_MAIN) {
6470 vsi_type = I40E_VSI_VMDQ2;
6472 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6477 /* underlying switching element */
6478 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6480 /* create channel (VSI), configure TX rings */
6481 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6483 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6487 return ch->initialized ? true : false;
6491 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6492 * @vsi: ptr to VSI which has PF backing
6494 * Sets up switch mode correctly if it needs to be changed and perform
6495 * what are allowed modes.
6497 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6500 struct i40e_pf *pf = vsi->back;
6501 struct i40e_hw *hw = &pf->hw;
6504 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6508 if (hw->dev_caps.switch_mode) {
6509 /* if switch mode is set, support mode2 (non-tunneled for
6510 * cloud filter) for now
6512 u32 switch_mode = hw->dev_caps.switch_mode &
6513 I40E_SWITCH_MODE_MASK;
6514 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6515 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6517 dev_err(&pf->pdev->dev,
6518 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6519 hw->dev_caps.switch_mode);
6524 /* Set Bit 7 to be valid */
6525 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6527 /* Set L4type for TCP support */
6528 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6530 /* Set cloud filter mode */
6531 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6533 /* Prep mode field for set_switch_config */
6534 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6535 pf->last_sw_conf_valid_flags,
6537 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6538 dev_err(&pf->pdev->dev,
6539 "couldn't set switch config bits, err %pe aq_err %s\n",
6542 hw->aq.asq_last_status));
6548 * i40e_create_queue_channel - function to create channel
6549 * @vsi: VSI to be configured
6550 * @ch: ptr to channel (it contains channel specific params)
6552 * This function creates channel (VSI) using num_queues specified by user,
6553 * reconfigs RSS if needed.
6555 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6556 struct i40e_channel *ch)
6558 struct i40e_pf *pf = vsi->back;
6565 if (!ch->num_queue_pairs) {
6566 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6567 ch->num_queue_pairs);
6571 /* validate user requested num_queues for channel */
6572 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6575 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6576 ch->num_queue_pairs);
6580 /* By default we are in VEPA mode, if this is the first VF/VMDq
6581 * VSI to be added switch to VEB mode.
6584 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
6585 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
6587 if (vsi->type == I40E_VSI_MAIN) {
6588 if (i40e_is_tc_mqprio_enabled(pf))
6589 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
6591 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
6593 /* now onwards for main VSI, number of queues will be value
6594 * of TC0's queue count
6598 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6599 * it should be more than num_queues
6601 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6602 dev_dbg(&pf->pdev->dev,
6603 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6604 vsi->cnt_q_avail, ch->num_queue_pairs);
6608 /* reconfig_rss only if vsi type is MAIN_VSI */
6609 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6610 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6612 dev_info(&pf->pdev->dev,
6613 "Error: unable to reconfig rss for num_queues (%u)\n",
6614 ch->num_queue_pairs);
6619 if (!i40e_setup_channel(pf, vsi, ch)) {
6620 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6624 dev_info(&pf->pdev->dev,
6625 "Setup channel (id:%u) utilizing num_queues %d\n",
6626 ch->seid, ch->num_queue_pairs);
6628 /* configure VSI for BW limit */
6629 if (ch->max_tx_rate) {
6630 u64 credits = ch->max_tx_rate;
6632 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6635 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6636 dev_dbg(&pf->pdev->dev,
6637 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6643 /* in case of VF, this will be main SRIOV VSI */
6644 ch->parent_vsi = vsi;
6646 /* and update main_vsi's count for queue_available to use */
6647 vsi->cnt_q_avail -= ch->num_queue_pairs;
6653 * i40e_configure_queue_channels - Add queue channel for the given TCs
6654 * @vsi: VSI to be configured
6656 * Configures queue channel mapping to the given TCs
6658 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6660 struct i40e_channel *ch;
6664 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6665 vsi->tc_seid_map[0] = vsi->seid;
6666 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6667 if (vsi->tc_config.enabled_tc & BIT(i)) {
6668 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6674 INIT_LIST_HEAD(&ch->list);
6675 ch->num_queue_pairs =
6676 vsi->tc_config.tc_info[i].qcount;
6678 vsi->tc_config.tc_info[i].qoffset;
6680 /* Bandwidth limit through tc interface is in bytes/s,
6683 max_rate = vsi->mqprio_qopt.max_rate[i];
6684 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6685 ch->max_tx_rate = max_rate;
6687 list_add_tail(&ch->list, &vsi->ch_list);
6689 ret = i40e_create_queue_channel(vsi, ch);
6691 dev_err(&vsi->back->pdev->dev,
6692 "Failed creating queue channel with TC%d: queues %d\n",
6693 i, ch->num_queue_pairs);
6696 vsi->tc_seid_map[i] = ch->seid;
6700 /* reset to reconfigure TX queue contexts */
6701 i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true);
6705 i40e_remove_queue_channels(vsi);
6710 * i40e_veb_config_tc - Configure TCs for given VEB
6712 * @enabled_tc: TC bitmap
6714 * Configures given TC bitmap for VEB (switching) element
6716 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6718 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6719 struct i40e_pf *pf = veb->pf;
6723 /* No TCs or already enabled TCs just return */
6724 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6727 bw_data.tc_valid_bits = enabled_tc;
6728 /* bw_data.absolute_credits is not set (relative) */
6730 /* Enable ETS TCs with equal BW Share for now */
6731 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6732 if (enabled_tc & BIT(i))
6733 bw_data.tc_bw_share_credits[i] = 1;
6736 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6739 dev_info(&pf->pdev->dev,
6740 "VEB bw config failed, err %pe aq_err %s\n",
6742 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6746 /* Update the BW information */
6747 ret = i40e_veb_get_bw_info(veb);
6749 dev_info(&pf->pdev->dev,
6750 "Failed getting veb bw config, err %pe aq_err %s\n",
6752 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6759 #ifdef CONFIG_I40E_DCB
6761 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6764 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6765 * the caller would've quiesce all the VSIs before calling
6768 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6774 /* Enable the TCs available on PF to all VEBs */
6775 tc_map = i40e_pf_get_tc_map(pf);
6776 if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
6779 for (v = 0; v < I40E_MAX_VEB; v++) {
6782 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6784 dev_info(&pf->pdev->dev,
6785 "Failed configuring TC for VEB seid=%d\n",
6787 /* Will try to configure as many components */
6791 /* Update each VSI */
6792 for (v = 0; v < pf->num_alloc_vsi; v++) {
6796 /* - Enable all TCs for the LAN VSI
6797 * - For all others keep them at TC0 for now
6799 if (v == pf->lan_vsi)
6800 tc_map = i40e_pf_get_tc_map(pf);
6802 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6804 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6806 dev_info(&pf->pdev->dev,
6807 "Failed configuring TC for VSI seid=%d\n",
6809 /* Will try to configure as many components */
6811 /* Re-configure VSI vectors based on updated TC map */
6812 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6813 if (pf->vsi[v]->netdev)
6814 i40e_dcbnl_set_all(pf->vsi[v]);
6820 * i40e_resume_port_tx - Resume port Tx
6823 * Resume a port's Tx and issue a PF reset in case of failure to
6826 static int i40e_resume_port_tx(struct i40e_pf *pf)
6828 struct i40e_hw *hw = &pf->hw;
6831 ret = i40e_aq_resume_port_tx(hw, NULL);
6833 dev_info(&pf->pdev->dev,
6834 "Resume Port Tx failed, err %pe aq_err %s\n",
6836 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6837 /* Schedule PF reset to recover */
6838 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6839 i40e_service_event_schedule(pf);
6846 * i40e_suspend_port_tx - Suspend port Tx
6849 * Suspend a port's Tx and issue a PF reset in case of failure.
6851 static int i40e_suspend_port_tx(struct i40e_pf *pf)
6853 struct i40e_hw *hw = &pf->hw;
6856 ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
6858 dev_info(&pf->pdev->dev,
6859 "Suspend Port Tx failed, err %pe aq_err %s\n",
6861 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6862 /* Schedule PF reset to recover */
6863 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6864 i40e_service_event_schedule(pf);
6871 * i40e_hw_set_dcb_config - Program new DCBX settings into HW
6872 * @pf: PF being configured
6873 * @new_cfg: New DCBX configuration
6875 * Program DCB settings into HW and reconfigure VEB/VSIs on
6876 * given PF. Uses "Set LLDP MIB" AQC to program the hardware.
6878 static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
6879 struct i40e_dcbx_config *new_cfg)
6881 struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config;
6884 /* Check if need reconfiguration */
6885 if (!memcmp(&new_cfg, &old_cfg, sizeof(new_cfg))) {
6886 dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n");
6890 /* Config change disable all VSIs */
6891 i40e_pf_quiesce_all_vsi(pf);
6893 /* Copy the new config to the current config */
6894 *old_cfg = *new_cfg;
6895 old_cfg->etsrec = old_cfg->etscfg;
6896 ret = i40e_set_dcb_config(&pf->hw);
6898 dev_info(&pf->pdev->dev,
6899 "Set DCB Config failed, err %pe aq_err %s\n",
6901 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6905 /* Changes in configuration update VEB/VSI */
6906 i40e_dcb_reconfigure(pf);
6908 /* In case of reset do not try to resume anything */
6909 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
6910 /* Re-start the VSIs if disabled */
6911 ret = i40e_resume_port_tx(pf);
6912 /* In case of error no point in resuming VSIs */
6915 i40e_pf_unquiesce_all_vsi(pf);
6922 * i40e_hw_dcb_config - Program new DCBX settings into HW
6923 * @pf: PF being configured
6924 * @new_cfg: New DCBX configuration
6926 * Program DCB settings into HW and reconfigure VEB/VSIs on
6929 int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
6931 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6932 u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0};
6933 u32 mfs_tc[I40E_MAX_TRAFFIC_CLASS];
6934 struct i40e_dcbx_config *old_cfg;
6935 u8 mode[I40E_MAX_TRAFFIC_CLASS];
6936 struct i40e_rx_pb_config pb_cfg;
6937 struct i40e_hw *hw = &pf->hw;
6938 u8 num_ports = hw->num_ports;
6946 dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n");
6947 /* Un-pack information to Program ETS HW via shared API
6950 * ETS/NON-ETS arbiter mode
6951 * max exponent (credit refills)
6952 * Total number of ports
6953 * PFC priority bit-map
6956 * Arbiter mode between UPs sharing same TC
6957 * TSA table (ETS or non-ETS)
6958 * EEE enabled or not
6962 new_numtc = i40e_dcb_get_num_tc(new_cfg);
6964 memset(&ets_data, 0, sizeof(ets_data));
6965 for (i = 0; i < new_numtc; i++) {
6967 switch (new_cfg->etscfg.tsatable[i]) {
6968 case I40E_IEEE_TSA_ETS:
6969 prio_type[i] = I40E_DCB_PRIO_TYPE_ETS;
6970 ets_data.tc_bw_share_credits[i] =
6971 new_cfg->etscfg.tcbwtable[i];
6973 case I40E_IEEE_TSA_STRICT:
6974 prio_type[i] = I40E_DCB_PRIO_TYPE_STRICT;
6976 ets_data.tc_bw_share_credits[i] =
6977 I40E_DCB_STRICT_PRIO_CREDITS;
6980 /* Invalid TSA type */
6981 need_reconfig = false;
6986 old_cfg = &hw->local_dcbx_config;
6987 /* Check if need reconfiguration */
6988 need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg);
6990 /* If needed, enable/disable frame tagging, disable all VSIs
6991 * and suspend port tx
6993 if (need_reconfig) {
6994 /* Enable DCB tagging only when more than one TC */
6996 set_bit(I40E_FLAG_DCB_ENA, pf->flags);
6998 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
7000 set_bit(__I40E_PORT_SUSPENDED, pf->state);
7001 /* Reconfiguration needed quiesce all VSIs */
7002 i40e_pf_quiesce_all_vsi(pf);
7003 ret = i40e_suspend_port_tx(pf);
7008 /* Configure Port ETS Tx Scheduler */
7009 ets_data.tc_valid_bits = tc_map;
7010 ets_data.tc_strict_priority_flags = lltc_map;
7011 ret = i40e_aq_config_switch_comp_ets
7012 (hw, pf->mac_seid, &ets_data,
7013 i40e_aqc_opc_modify_switching_comp_ets, NULL);
7015 dev_info(&pf->pdev->dev,
7016 "Modify Port ETS failed, err %pe aq_err %s\n",
7018 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7022 /* Configure Rx ETS HW */
7023 memset(&mode, I40E_DCB_ARB_MODE_ROUND_ROBIN, sizeof(mode));
7024 i40e_dcb_hw_set_num_tc(hw, new_numtc);
7025 i40e_dcb_hw_rx_fifo_config(hw, I40E_DCB_ARB_MODE_ROUND_ROBIN,
7026 I40E_DCB_ARB_MODE_STRICT_PRIORITY,
7027 I40E_DCB_DEFAULT_MAX_EXPONENT,
7029 i40e_dcb_hw_rx_cmd_monitor_config(hw, new_numtc, num_ports);
7030 i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode,
7032 i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable,
7033 new_cfg->etscfg.prioritytable);
7034 i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable);
7036 /* Configure Rx Packet Buffers in HW */
7037 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7038 mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
7039 mfs_tc[i] += I40E_PACKET_HDR_PAD;
7042 i40e_dcb_hw_calculate_pool_sizes(hw, num_ports,
7043 false, new_cfg->pfc.pfcenable,
7045 i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg);
7047 /* Update the local Rx Packet buffer config */
7048 pf->pb_cfg = pb_cfg;
7050 /* Inform the FW about changes to DCB configuration */
7051 ret = i40e_aq_dcb_updated(&pf->hw, NULL);
7053 dev_info(&pf->pdev->dev,
7054 "DCB Updated failed, err %pe aq_err %s\n",
7056 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7060 /* Update the port DCBx configuration */
7061 *old_cfg = *new_cfg;
7063 /* Changes in configuration update VEB/VSI */
7064 i40e_dcb_reconfigure(pf);
7066 /* Re-start the VSIs if disabled */
7067 if (need_reconfig) {
7068 ret = i40e_resume_port_tx(pf);
7070 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
7071 /* In case of error no point in resuming VSIs */
7075 /* Wait for the PF's queues to be disabled */
7076 ret = i40e_pf_wait_queues_disabled(pf);
7078 /* Schedule PF reset to recover */
7079 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
7080 i40e_service_event_schedule(pf);
7083 i40e_pf_unquiesce_all_vsi(pf);
7084 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
7085 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
7087 /* registers are set, lets apply */
7088 if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps))
7089 ret = i40e_hw_set_dcb_config(pf, new_cfg);
7097 * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW
7098 * @pf: PF being queried
7100 * Set default DCB configuration in case DCB is to be done in SW.
7102 int i40e_dcb_sw_default_config(struct i40e_pf *pf)
7104 struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config;
7105 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
7106 struct i40e_hw *hw = &pf->hw;
7109 if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps)) {
7110 /* Update the local cached instance with TC0 ETS */
7111 memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
7112 pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
7113 pf->tmp_cfg.etscfg.maxtcs = 0;
7114 pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
7115 pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
7116 pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING;
7117 pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
7118 /* FW needs one App to configure HW */
7119 pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS;
7120 pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE;
7121 pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO;
7122 pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE;
7124 return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg);
7127 memset(&ets_data, 0, sizeof(ets_data));
7128 ets_data.tc_valid_bits = I40E_DEFAULT_TRAFFIC_CLASS; /* TC0 only */
7129 ets_data.tc_strict_priority_flags = 0; /* ETS */
7130 ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW; /* 100% to TC0 */
7132 /* Enable ETS on the Physical port */
7133 err = i40e_aq_config_switch_comp_ets
7134 (hw, pf->mac_seid, &ets_data,
7135 i40e_aqc_opc_enable_switching_comp_ets, NULL);
7137 dev_info(&pf->pdev->dev,
7138 "Enable Port ETS failed, err %pe aq_err %s\n",
7140 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7145 /* Update the local cached instance with TC0 ETS */
7146 dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
7147 dcb_cfg->etscfg.cbs = 0;
7148 dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
7149 dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
7156 * i40e_init_pf_dcb - Initialize DCB configuration
7157 * @pf: PF being configured
7159 * Query the current DCB configuration and cache it
7160 * in the hardware structure
7162 static int i40e_init_pf_dcb(struct i40e_pf *pf)
7164 struct i40e_hw *hw = &pf->hw;
7167 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
7168 * Also do not enable DCBx if FW LLDP agent is disabled
7170 if (test_bit(I40E_HW_CAP_NO_DCB_SUPPORT, pf->hw.caps)) {
7171 dev_info(&pf->pdev->dev, "DCB is not supported.\n");
7175 if (test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) {
7176 dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
7177 err = i40e_dcb_sw_default_config(pf);
7179 dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n");
7182 dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n");
7183 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
7184 DCB_CAP_DCBX_VER_IEEE;
7185 /* at init capable but disabled */
7186 set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
7187 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
7190 err = i40e_init_dcb(hw, true);
7192 /* Device/Function is not DCBX capable */
7193 if ((!hw->func_caps.dcb) ||
7194 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
7195 dev_info(&pf->pdev->dev,
7196 "DCBX offload is not supported or is disabled for this PF.\n");
7198 /* When status is not DISABLED then DCBX in FW */
7199 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
7200 DCB_CAP_DCBX_VER_IEEE;
7202 set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
7203 /* Enable DCB tagging only when more than one TC
7204 * or explicitly disable if only one TC
7206 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
7207 set_bit(I40E_FLAG_DCB_ENA, pf->flags);
7209 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
7210 dev_dbg(&pf->pdev->dev,
7211 "DCBX offload is supported for this PF.\n");
7213 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
7214 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
7215 set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags);
7217 dev_info(&pf->pdev->dev,
7218 "Query for DCB configuration failed, err %pe aq_err %s\n",
7220 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7226 #endif /* CONFIG_I40E_DCB */
7229 * i40e_print_link_message - print link up or down
7230 * @vsi: the VSI for which link needs a message
7231 * @isup: true of link is up, false otherwise
7233 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
7235 enum i40e_aq_link_speed new_speed;
7236 struct i40e_pf *pf = vsi->back;
7237 char *speed = "Unknown";
7238 char *fc = "Unknown";
7244 new_speed = pf->hw.phy.link_info.link_speed;
7246 new_speed = I40E_LINK_SPEED_UNKNOWN;
7248 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
7250 vsi->current_isup = isup;
7251 vsi->current_speed = new_speed;
7253 netdev_info(vsi->netdev, "NIC Link is Down\n");
7257 /* Warn user if link speed on NPAR enabled partition is not at
7260 if (pf->hw.func_caps.npar_enable &&
7261 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
7262 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
7263 netdev_warn(vsi->netdev,
7264 "The partition detected link speed that is less than 10Gbps\n");
7266 switch (pf->hw.phy.link_info.link_speed) {
7267 case I40E_LINK_SPEED_40GB:
7270 case I40E_LINK_SPEED_20GB:
7273 case I40E_LINK_SPEED_25GB:
7276 case I40E_LINK_SPEED_10GB:
7279 case I40E_LINK_SPEED_5GB:
7282 case I40E_LINK_SPEED_2_5GB:
7285 case I40E_LINK_SPEED_1GB:
7288 case I40E_LINK_SPEED_100MB:
7295 switch (pf->hw.fc.current_mode) {
7299 case I40E_FC_TX_PAUSE:
7302 case I40E_FC_RX_PAUSE:
7310 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
7315 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7318 if (pf->hw.phy.link_info.fec_info &
7319 I40E_AQ_CONFIG_FEC_KR_ENA)
7320 fec = "CL74 FC-FEC/BASE-R";
7321 else if (pf->hw.phy.link_info.fec_info &
7322 I40E_AQ_CONFIG_FEC_RS_ENA)
7323 fec = "CL108 RS-FEC";
7325 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
7326 * both RS and FC are requested
7328 if (vsi->back->hw.phy.link_info.req_fec_info &
7329 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
7330 if (vsi->back->hw.phy.link_info.req_fec_info &
7331 I40E_AQ_REQUEST_FEC_RS)
7332 req_fec = "CL108 RS-FEC";
7334 req_fec = "CL74 FC-FEC/BASE-R";
7336 netdev_info(vsi->netdev,
7337 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7338 speed, req_fec, fec, an, fc);
7339 } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
7344 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7347 if (pf->hw.phy.link_info.fec_info &
7348 I40E_AQ_CONFIG_FEC_KR_ENA)
7349 fec = "CL74 FC-FEC/BASE-R";
7351 if (pf->hw.phy.link_info.req_fec_info &
7352 I40E_AQ_REQUEST_FEC_KR)
7353 req_fec = "CL74 FC-FEC/BASE-R";
7355 netdev_info(vsi->netdev,
7356 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7357 speed, req_fec, fec, an, fc);
7359 netdev_info(vsi->netdev,
7360 "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
7367 * i40e_up_complete - Finish the last steps of bringing up a connection
7368 * @vsi: the VSI being configured
7370 static int i40e_up_complete(struct i40e_vsi *vsi)
7372 struct i40e_pf *pf = vsi->back;
7375 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
7376 i40e_vsi_configure_msix(vsi);
7378 i40e_configure_msi_and_legacy(vsi);
7381 err = i40e_vsi_start_rings(vsi);
7385 clear_bit(__I40E_VSI_DOWN, vsi->state);
7386 i40e_napi_enable_all(vsi);
7387 i40e_vsi_enable_irq(vsi);
7389 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
7391 i40e_print_link_message(vsi, true);
7392 netif_tx_start_all_queues(vsi->netdev);
7393 netif_carrier_on(vsi->netdev);
7396 /* replay FDIR SB filters */
7397 if (vsi->type == I40E_VSI_FDIR) {
7398 /* reset fd counters */
7401 i40e_fdir_filter_restore(vsi);
7404 /* On the next run of the service_task, notify any clients of the new
7407 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
7408 i40e_service_event_schedule(pf);
7414 * i40e_vsi_reinit_locked - Reset the VSI
7415 * @vsi: the VSI being configured
7417 * Rebuild the ring structs after some configuration
7418 * has changed, e.g. MTU size.
7420 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
7422 struct i40e_pf *pf = vsi->back;
7424 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
7425 usleep_range(1000, 2000);
7429 clear_bit(__I40E_CONFIG_BUSY, pf->state);
7433 * i40e_force_link_state - Force the link status
7434 * @pf: board private structure
7435 * @is_up: whether the link state should be forced up or down
7437 static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
7439 struct i40e_aq_get_phy_abilities_resp abilities;
7440 struct i40e_aq_set_phy_config config = {0};
7441 bool non_zero_phy_type = is_up;
7442 struct i40e_hw *hw = &pf->hw;
7447 /* Card might've been put in an unstable state by other drivers
7448 * and applications, which causes incorrect speed values being
7449 * set on startup. In order to clear speed registers, we call
7450 * get_phy_capabilities twice, once to get initial state of
7451 * available speeds, and once to get current PHY config.
7453 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
7456 dev_err(&pf->pdev->dev,
7457 "failed to get phy cap., ret = %pe last_status = %s\n",
7459 i40e_aq_str(hw, hw->aq.asq_last_status));
7462 speed = abilities.link_speed;
7464 /* Get the current phy config */
7465 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
7468 dev_err(&pf->pdev->dev,
7469 "failed to get phy cap., ret = %pe last_status = %s\n",
7471 i40e_aq_str(hw, hw->aq.asq_last_status));
7475 /* If link needs to go up, but was not forced to go down,
7476 * and its speed values are OK, no need for a flap
7477 * if non_zero_phy_type was set, still need to force up
7479 if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags))
7480 non_zero_phy_type = true;
7481 else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
7484 /* To force link we need to set bits for all supported PHY types,
7485 * but there are now more than 32, so we need to split the bitmap
7486 * across two fields.
7488 mask = I40E_PHY_TYPES_BITMASK;
7490 non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
7491 config.phy_type_ext =
7492 non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
7493 /* Copy the old settings, except of phy_type */
7494 config.abilities = abilities.abilities;
7495 if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) {
7497 config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
7499 config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
7501 if (abilities.link_speed != 0)
7502 config.link_speed = abilities.link_speed;
7504 config.link_speed = speed;
7505 config.eee_capability = abilities.eee_capability;
7506 config.eeer = abilities.eeer_val;
7507 config.low_power_ctrl = abilities.d3_lpan;
7508 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
7509 I40E_AQ_PHY_FEC_CONFIG_MASK;
7510 err = i40e_aq_set_phy_config(hw, &config, NULL);
7513 dev_err(&pf->pdev->dev,
7514 "set phy config ret = %pe last_status = %s\n",
7516 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7520 /* Update the link info */
7521 err = i40e_update_link_info(hw);
7523 /* Wait a little bit (on 40G cards it sometimes takes a really
7524 * long time for link to come back from the atomic reset)
7528 i40e_update_link_info(hw);
7531 i40e_aq_set_link_restart_an(hw, is_up, NULL);
7537 * i40e_up - Bring the connection back up after being down
7538 * @vsi: the VSI being configured
7540 int i40e_up(struct i40e_vsi *vsi)
7544 if (vsi->type == I40E_VSI_MAIN &&
7545 (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) ||
7546 test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags)))
7547 i40e_force_link_state(vsi->back, true);
7549 err = i40e_vsi_configure(vsi);
7551 err = i40e_up_complete(vsi);
7557 * i40e_down - Shutdown the connection processing
7558 * @vsi: the VSI being stopped
7560 void i40e_down(struct i40e_vsi *vsi)
7564 /* It is assumed that the caller of this function
7565 * sets the vsi->state __I40E_VSI_DOWN bit.
7568 netif_carrier_off(vsi->netdev);
7569 netif_tx_disable(vsi->netdev);
7571 i40e_vsi_disable_irq(vsi);
7572 i40e_vsi_stop_rings(vsi);
7573 if (vsi->type == I40E_VSI_MAIN &&
7574 (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) ||
7575 test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags)))
7576 i40e_force_link_state(vsi->back, false);
7577 i40e_napi_disable_all(vsi);
7579 for (i = 0; i < vsi->num_queue_pairs; i++) {
7580 i40e_clean_tx_ring(vsi->tx_rings[i]);
7581 if (i40e_enabled_xdp_vsi(vsi)) {
7582 /* Make sure that in-progress ndo_xdp_xmit and
7583 * ndo_xsk_wakeup calls are completed.
7586 i40e_clean_tx_ring(vsi->xdp_rings[i]);
7588 i40e_clean_rx_ring(vsi->rx_rings[i]);
7594 * i40e_validate_mqprio_qopt- validate queue mapping info
7595 * @vsi: the VSI being configured
7596 * @mqprio_qopt: queue parametrs
7598 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
7599 struct tc_mqprio_qopt_offload *mqprio_qopt)
7601 u64 sum_max_rate = 0;
7605 if (mqprio_qopt->qopt.offset[0] != 0 ||
7606 mqprio_qopt->qopt.num_tc < 1 ||
7607 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
7609 for (i = 0; ; i++) {
7610 if (!mqprio_qopt->qopt.count[i])
7612 if (mqprio_qopt->min_rate[i]) {
7613 dev_err(&vsi->back->pdev->dev,
7614 "Invalid min tx rate (greater than 0) specified\n");
7617 max_rate = mqprio_qopt->max_rate[i];
7618 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
7619 sum_max_rate += max_rate;
7621 if (i >= mqprio_qopt->qopt.num_tc - 1)
7623 if (mqprio_qopt->qopt.offset[i + 1] !=
7624 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7627 if (vsi->num_queue_pairs <
7628 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
7629 dev_err(&vsi->back->pdev->dev,
7630 "Failed to create traffic channel, insufficient number of queues.\n");
7633 if (sum_max_rate > i40e_get_link_speed(vsi)) {
7634 dev_err(&vsi->back->pdev->dev,
7635 "Invalid max tx rate specified\n");
7642 * i40e_vsi_set_default_tc_config - set default values for tc configuration
7643 * @vsi: the VSI being configured
7645 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
7650 /* Only TC0 is enabled */
7651 vsi->tc_config.numtc = 1;
7652 vsi->tc_config.enabled_tc = 1;
7653 qcount = min_t(int, vsi->alloc_queue_pairs,
7654 i40e_pf_get_max_q_per_tc(vsi->back));
7655 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7656 /* For the TC that is not enabled set the offset to default
7657 * queue and allocate one queue for the given TC.
7659 vsi->tc_config.tc_info[i].qoffset = 0;
7661 vsi->tc_config.tc_info[i].qcount = qcount;
7663 vsi->tc_config.tc_info[i].qcount = 1;
7664 vsi->tc_config.tc_info[i].netdev_tc = 0;
7669 * i40e_del_macvlan_filter
7670 * @hw: pointer to the HW structure
7671 * @seid: seid of the channel VSI
7672 * @macaddr: the mac address to apply as a filter
7673 * @aq_err: store the admin Q error
7675 * This function deletes a mac filter on the channel VSI which serves as the
7676 * macvlan. Returns 0 on success.
7678 static int i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
7679 const u8 *macaddr, int *aq_err)
7681 struct i40e_aqc_remove_macvlan_element_data element;
7684 memset(&element, 0, sizeof(element));
7685 ether_addr_copy(element.mac_addr, macaddr);
7686 element.vlan_tag = 0;
7687 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7688 status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
7689 *aq_err = hw->aq.asq_last_status;
7695 * i40e_add_macvlan_filter
7696 * @hw: pointer to the HW structure
7697 * @seid: seid of the channel VSI
7698 * @macaddr: the mac address to apply as a filter
7699 * @aq_err: store the admin Q error
7701 * This function adds a mac filter on the channel VSI which serves as the
7702 * macvlan. Returns 0 on success.
7704 static int i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
7705 const u8 *macaddr, int *aq_err)
7707 struct i40e_aqc_add_macvlan_element_data element;
7711 ether_addr_copy(element.mac_addr, macaddr);
7712 element.vlan_tag = 0;
7713 element.queue_number = 0;
7714 element.match_method = I40E_AQC_MM_ERR_NO_RES;
7715 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7716 element.flags = cpu_to_le16(cmd_flags);
7717 status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
7718 *aq_err = hw->aq.asq_last_status;
7724 * i40e_reset_ch_rings - Reset the queue contexts in a channel
7725 * @vsi: the VSI we want to access
7726 * @ch: the channel we want to access
7728 static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
7730 struct i40e_ring *tx_ring, *rx_ring;
7734 for (i = 0; i < ch->num_queue_pairs; i++) {
7735 pf_q = ch->base_queue + i;
7736 tx_ring = vsi->tx_rings[pf_q];
7738 rx_ring = vsi->rx_rings[pf_q];
7744 * i40e_free_macvlan_channels
7745 * @vsi: the VSI we want to access
7747 * This function frees the Qs of the channel VSI from
7748 * the stack and also deletes the channel VSIs which
7749 * serve as macvlans.
7751 static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
7753 struct i40e_channel *ch, *ch_tmp;
7756 if (list_empty(&vsi->macvlan_list))
7759 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7760 struct i40e_vsi *parent_vsi;
7762 if (i40e_is_channel_macvlan(ch)) {
7763 i40e_reset_ch_rings(vsi, ch);
7764 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7765 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
7766 netdev_set_sb_channel(ch->fwd->netdev, 0);
7771 list_del(&ch->list);
7772 parent_vsi = ch->parent_vsi;
7773 if (!parent_vsi || !ch->initialized) {
7778 /* remove the VSI */
7779 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7782 dev_err(&vsi->back->pdev->dev,
7783 "unable to remove channel (%d) for parent VSI(%d)\n",
7784 ch->seid, parent_vsi->seid);
7787 vsi->macvlan_cnt = 0;
7791 * i40e_fwd_ring_up - bring the macvlan device up
7792 * @vsi: the VSI we want to access
7793 * @vdev: macvlan netdevice
7794 * @fwd: the private fwd structure
7796 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7797 struct i40e_fwd_adapter *fwd)
7799 struct i40e_channel *ch = NULL, *ch_tmp, *iter;
7800 int ret = 0, num_tc = 1, i, aq_err;
7801 struct i40e_pf *pf = vsi->back;
7802 struct i40e_hw *hw = &pf->hw;
7804 /* Go through the list and find an available channel */
7805 list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
7806 if (!i40e_is_channel_macvlan(iter)) {
7808 /* record configuration for macvlan interface in vdev */
7809 for (i = 0; i < num_tc; i++)
7810 netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7812 iter->num_queue_pairs,
7814 for (i = 0; i < iter->num_queue_pairs; i++) {
7815 struct i40e_ring *tx_ring, *rx_ring;
7818 pf_q = iter->base_queue + i;
7820 /* Get to TX ring ptr */
7821 tx_ring = vsi->tx_rings[pf_q];
7824 /* Get the RX ring ptr */
7825 rx_ring = vsi->rx_rings[pf_q];
7836 /* Guarantee all rings are updated before we update the
7837 * MAC address filter.
7841 /* Add a mac filter */
7842 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7844 /* if we cannot add the MAC rule then disable the offload */
7845 macvlan_release_l2fw_offload(vdev);
7846 for (i = 0; i < ch->num_queue_pairs; i++) {
7847 struct i40e_ring *rx_ring;
7850 pf_q = ch->base_queue + i;
7851 rx_ring = vsi->rx_rings[pf_q];
7852 rx_ring->netdev = NULL;
7854 dev_info(&pf->pdev->dev,
7855 "Error adding mac filter on macvlan err %pe, aq_err %s\n",
7857 i40e_aq_str(hw, aq_err));
7858 netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7865 * i40e_setup_macvlans - create the channels which will be macvlans
7866 * @vsi: the VSI we want to access
7867 * @macvlan_cnt: no. of macvlans to be setup
7868 * @qcnt: no. of Qs per macvlan
7869 * @vdev: macvlan netdevice
7871 static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7872 struct net_device *vdev)
7874 struct i40e_pf *pf = vsi->back;
7875 struct i40e_hw *hw = &pf->hw;
7876 struct i40e_vsi_context ctxt;
7877 u16 sections, qmap, num_qps;
7878 struct i40e_channel *ch;
7879 int i, pow, ret = 0;
7882 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7885 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7887 /* find the next higher power-of-2 of num queue pairs */
7888 pow = fls(roundup_pow_of_two(num_qps) - 1);
7890 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7891 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7893 /* Setup context bits for the main VSI */
7894 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7895 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7896 memset(&ctxt, 0, sizeof(ctxt));
7897 ctxt.seid = vsi->seid;
7898 ctxt.pf_num = vsi->back->hw.pf_id;
7900 ctxt.uplink_seid = vsi->uplink_seid;
7901 ctxt.info = vsi->info;
7902 ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7903 ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7904 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7905 ctxt.info.valid_sections |= cpu_to_le16(sections);
7907 /* Reconfigure RSS for main VSI with new max queue count */
7908 vsi->rss_size = max_t(u16, num_qps, qcnt);
7909 ret = i40e_vsi_config_rss(vsi);
7911 dev_info(&pf->pdev->dev,
7912 "Failed to reconfig RSS for num_queues (%u)\n",
7916 vsi->reconfig_rss = true;
7917 dev_dbg(&vsi->back->pdev->dev,
7918 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7919 vsi->next_base_queue = num_qps;
7920 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7922 /* Update the VSI after updating the VSI queue-mapping
7925 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7927 dev_info(&pf->pdev->dev,
7928 "Update vsi tc config failed, err %pe aq_err %s\n",
7930 i40e_aq_str(hw, hw->aq.asq_last_status));
7933 /* update the local VSI info with updated queue map */
7934 i40e_vsi_update_queue_map(vsi, &ctxt);
7935 vsi->info.valid_sections = 0;
7937 /* Create channels for macvlans */
7938 INIT_LIST_HEAD(&vsi->macvlan_list);
7939 for (i = 0; i < macvlan_cnt; i++) {
7940 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7945 INIT_LIST_HEAD(&ch->list);
7946 ch->num_queue_pairs = qcnt;
7947 if (!i40e_setup_channel(pf, vsi, ch)) {
7952 ch->parent_vsi = vsi;
7953 vsi->cnt_q_avail -= ch->num_queue_pairs;
7955 list_add_tail(&ch->list, &vsi->macvlan_list);
7961 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7962 i40e_free_macvlan_channels(vsi);
7968 * i40e_fwd_add - configure macvlans
7969 * @netdev: net device to configure
7970 * @vdev: macvlan netdevice
7972 static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7974 struct i40e_netdev_priv *np = netdev_priv(netdev);
7975 u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7976 struct i40e_vsi *vsi = np->vsi;
7977 struct i40e_pf *pf = vsi->back;
7978 struct i40e_fwd_adapter *fwd;
7979 int avail_macvlan, ret;
7981 if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
7982 netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7983 return ERR_PTR(-EINVAL);
7985 if (i40e_is_tc_mqprio_enabled(pf)) {
7986 netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7987 return ERR_PTR(-EINVAL);
7989 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7990 netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7991 return ERR_PTR(-EINVAL);
7994 /* The macvlan device has to be a single Q device so that the
7995 * tc_to_txq field can be reused to pick the tx queue.
7997 if (netif_is_multiqueue(vdev))
7998 return ERR_PTR(-ERANGE);
8000 if (!vsi->macvlan_cnt) {
8001 /* reserve bit 0 for the pf device */
8002 set_bit(0, vsi->fwd_bitmask);
8004 /* Try to reserve as many queues as possible for macvlans. First
8005 * reserve 3/4th of max vectors, then half, then quarter and
8006 * calculate Qs per macvlan as you go
8008 vectors = pf->num_lan_msix;
8009 if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
8010 /* allocate 4 Qs per macvlan and 32 Qs to the PF*/
8012 macvlan_cnt = (vectors - 32) / 4;
8013 } else if (vectors <= 64 && vectors > 32) {
8014 /* allocate 2 Qs per macvlan and 16 Qs to the PF*/
8016 macvlan_cnt = (vectors - 16) / 2;
8017 } else if (vectors <= 32 && vectors > 16) {
8018 /* allocate 1 Q per macvlan and 16 Qs to the PF*/
8020 macvlan_cnt = vectors - 16;
8021 } else if (vectors <= 16 && vectors > 8) {
8022 /* allocate 1 Q per macvlan and 8 Qs to the PF */
8024 macvlan_cnt = vectors - 8;
8026 /* allocate 1 Q per macvlan and 1 Q to the PF */
8028 macvlan_cnt = vectors - 1;
8031 if (macvlan_cnt == 0)
8032 return ERR_PTR(-EBUSY);
8034 /* Quiesce VSI queues */
8035 i40e_quiesce_vsi(vsi);
8037 /* sets up the macvlans but does not "enable" them */
8038 ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
8041 return ERR_PTR(ret);
8044 i40e_unquiesce_vsi(vsi);
8046 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
8048 if (avail_macvlan >= I40E_MAX_MACVLANS)
8049 return ERR_PTR(-EBUSY);
8051 /* create the fwd struct */
8052 fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
8054 return ERR_PTR(-ENOMEM);
8056 set_bit(avail_macvlan, vsi->fwd_bitmask);
8057 fwd->bit_no = avail_macvlan;
8058 netdev_set_sb_channel(vdev, avail_macvlan);
8061 if (!netif_running(netdev))
8064 /* Set fwd ring up */
8065 ret = i40e_fwd_ring_up(vsi, vdev, fwd);
8067 /* unbind the queues and drop the subordinate channel config */
8068 netdev_unbind_sb_channel(netdev, vdev);
8069 netdev_set_sb_channel(vdev, 0);
8072 return ERR_PTR(-EINVAL);
8079 * i40e_del_all_macvlans - Delete all the mac filters on the channels
8080 * @vsi: the VSI we want to access
8082 static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
8084 struct i40e_channel *ch, *ch_tmp;
8085 struct i40e_pf *pf = vsi->back;
8086 struct i40e_hw *hw = &pf->hw;
8087 int aq_err, ret = 0;
8089 if (list_empty(&vsi->macvlan_list))
8092 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
8093 if (i40e_is_channel_macvlan(ch)) {
8094 ret = i40e_del_macvlan_filter(hw, ch->seid,
8095 i40e_channel_mac(ch),
8098 /* Reset queue contexts */
8099 i40e_reset_ch_rings(vsi, ch);
8100 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
8101 netdev_unbind_sb_channel(vsi->netdev,
8103 netdev_set_sb_channel(ch->fwd->netdev, 0);
8112 * i40e_fwd_del - delete macvlan interfaces
8113 * @netdev: net device to configure
8114 * @vdev: macvlan netdevice
8116 static void i40e_fwd_del(struct net_device *netdev, void *vdev)
8118 struct i40e_netdev_priv *np = netdev_priv(netdev);
8119 struct i40e_fwd_adapter *fwd = vdev;
8120 struct i40e_channel *ch, *ch_tmp;
8121 struct i40e_vsi *vsi = np->vsi;
8122 struct i40e_pf *pf = vsi->back;
8123 struct i40e_hw *hw = &pf->hw;
8124 int aq_err, ret = 0;
8126 /* Find the channel associated with the macvlan and del mac filter */
8127 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
8128 if (i40e_is_channel_macvlan(ch) &&
8129 ether_addr_equal(i40e_channel_mac(ch),
8130 fwd->netdev->dev_addr)) {
8131 ret = i40e_del_macvlan_filter(hw, ch->seid,
8132 i40e_channel_mac(ch),
8135 /* Reset queue contexts */
8136 i40e_reset_ch_rings(vsi, ch);
8137 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
8138 netdev_unbind_sb_channel(netdev, fwd->netdev);
8139 netdev_set_sb_channel(fwd->netdev, 0);
8143 dev_info(&pf->pdev->dev,
8144 "Error deleting mac filter on macvlan err %pe, aq_err %s\n",
8146 i40e_aq_str(hw, aq_err));
8154 * i40e_setup_tc - configure multiple traffic classes
8155 * @netdev: net device to configure
8156 * @type_data: tc offload data
8158 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
8160 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8161 struct i40e_netdev_priv *np = netdev_priv(netdev);
8162 struct i40e_vsi *vsi = np->vsi;
8163 struct i40e_pf *pf = vsi->back;
8164 u8 enabled_tc = 0, num_tc, hw;
8165 bool need_reset = false;
8166 int old_queue_pairs;
8171 old_queue_pairs = vsi->num_queue_pairs;
8172 num_tc = mqprio_qopt->qopt.num_tc;
8173 hw = mqprio_qopt->qopt.hw;
8174 mode = mqprio_qopt->mode;
8176 clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
8177 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8181 /* Check if MFP enabled */
8182 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
8184 "Configuring TC not supported in MFP mode\n");
8188 case TC_MQPRIO_MODE_DCB:
8189 clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
8191 /* Check if DCB enabled to continue */
8192 if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
8194 "DCB is not enabled for adapter\n");
8198 /* Check whether tc count is within enabled limit */
8199 if (num_tc > i40e_pf_get_num_tc(pf)) {
8201 "TC count greater than enabled on link for adapter\n");
8205 case TC_MQPRIO_MODE_CHANNEL:
8206 if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
8208 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
8211 if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
8213 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
8216 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
8217 sizeof(*mqprio_qopt));
8218 set_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
8219 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
8226 /* Generate TC map for number of tc requested */
8227 for (i = 0; i < num_tc; i++)
8228 enabled_tc |= BIT(i);
8230 /* Requesting same TC configuration as already enabled */
8231 if (enabled_tc == vsi->tc_config.enabled_tc &&
8232 mode != TC_MQPRIO_MODE_CHANNEL)
8235 /* Quiesce VSI queues */
8236 i40e_quiesce_vsi(vsi);
8238 if (!hw && !i40e_is_tc_mqprio_enabled(pf))
8239 i40e_remove_queue_channels(vsi);
8241 /* Configure VSI for enabled TCs */
8242 ret = i40e_vsi_config_tc(vsi, enabled_tc);
8244 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
8248 } else if (enabled_tc &&
8249 (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
8251 "Failed to create channel. Override queues (%u) not power of 2\n",
8252 vsi->tc_config.tc_info[0].qcount);
8258 dev_info(&vsi->back->pdev->dev,
8259 "Setup channel (id:%u) utilizing num_queues %d\n",
8260 vsi->seid, vsi->tc_config.tc_info[0].qcount);
8262 if (i40e_is_tc_mqprio_enabled(pf)) {
8263 if (vsi->mqprio_qopt.max_rate[0]) {
8264 u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
8265 vsi->mqprio_qopt.max_rate[0]);
8267 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
8269 u64 credits = max_tx_rate;
8271 do_div(credits, I40E_BW_CREDIT_DIVISOR);
8272 dev_dbg(&vsi->back->pdev->dev,
8273 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
8282 ret = i40e_configure_queue_channels(vsi);
8284 vsi->num_queue_pairs = old_queue_pairs;
8286 "Failed configuring queue channels\n");
8293 /* Reset the configuration data to defaults, only TC0 is enabled */
8295 i40e_vsi_set_default_tc_config(vsi);
8300 i40e_unquiesce_vsi(vsi);
8305 * i40e_set_cld_element - sets cloud filter element data
8306 * @filter: cloud filter rule
8307 * @cld: ptr to cloud filter element data
8309 * This is helper function to copy data into cloud filter element
8312 i40e_set_cld_element(struct i40e_cloud_filter *filter,
8313 struct i40e_aqc_cloud_filters_element_data *cld)
8318 memset(cld, 0, sizeof(*cld));
8319 ether_addr_copy(cld->outer_mac, filter->dst_mac);
8320 ether_addr_copy(cld->inner_mac, filter->src_mac);
8322 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
8325 if (filter->n_proto == ETH_P_IPV6) {
8326 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
8327 for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
8328 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
8330 *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
8333 ipa = be32_to_cpu(filter->dst_ipv4);
8335 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
8338 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
8340 /* tenant_id is not supported by FW now, once the support is enabled
8341 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
8343 if (filter->tenant_id)
8348 * i40e_add_del_cloud_filter - Add/del cloud filter
8349 * @vsi: pointer to VSI
8350 * @filter: cloud filter rule
8351 * @add: if true, add, if false, delete
8353 * Add or delete a cloud filter for a specific flow spec.
8354 * Returns 0 if the filter were successfully added.
8356 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
8357 struct i40e_cloud_filter *filter, bool add)
8359 struct i40e_aqc_cloud_filters_element_data cld_filter;
8360 struct i40e_pf *pf = vsi->back;
8362 static const u16 flag_table[128] = {
8363 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
8364 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
8365 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
8366 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
8367 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
8368 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
8369 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
8370 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
8371 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
8372 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
8373 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
8374 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
8375 [I40E_CLOUD_FILTER_FLAGS_IIP] =
8376 I40E_AQC_ADD_CLOUD_FILTER_IIP,
8379 if (filter->flags >= ARRAY_SIZE(flag_table))
8382 memset(&cld_filter, 0, sizeof(cld_filter));
8384 /* copy element needed to add cloud filter from filter */
8385 i40e_set_cld_element(filter, &cld_filter);
8387 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
8388 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
8389 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
8391 if (filter->n_proto == ETH_P_IPV6)
8392 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8393 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8395 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8396 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8399 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
8402 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
8405 dev_dbg(&pf->pdev->dev,
8406 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
8407 add ? "add" : "delete", filter->dst_port, ret,
8408 pf->hw.aq.asq_last_status);
8410 dev_info(&pf->pdev->dev,
8411 "%s cloud filter for VSI: %d\n",
8412 add ? "Added" : "Deleted", filter->seid);
8417 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
8418 * @vsi: pointer to VSI
8419 * @filter: cloud filter rule
8420 * @add: if true, add, if false, delete
8422 * Add or delete a cloud filter for a specific flow spec using big buffer.
8423 * Returns 0 if the filter were successfully added.
8425 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
8426 struct i40e_cloud_filter *filter,
8429 struct i40e_aqc_cloud_filters_element_bb cld_filter;
8430 struct i40e_pf *pf = vsi->back;
8433 /* Both (src/dst) valid mac_addr are not supported */
8434 if ((is_valid_ether_addr(filter->dst_mac) &&
8435 is_valid_ether_addr(filter->src_mac)) ||
8436 (is_multicast_ether_addr(filter->dst_mac) &&
8437 is_multicast_ether_addr(filter->src_mac)))
8440 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
8441 * ports are not supported via big buffer now.
8443 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
8446 /* adding filter using src_port/src_ip is not supported at this stage */
8447 if (filter->src_port ||
8448 (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8449 !ipv6_addr_any(&filter->ip.v6.src_ip6))
8452 memset(&cld_filter, 0, sizeof(cld_filter));
8454 /* copy element needed to add cloud filter from filter */
8455 i40e_set_cld_element(filter, &cld_filter.element);
8457 if (is_valid_ether_addr(filter->dst_mac) ||
8458 is_valid_ether_addr(filter->src_mac) ||
8459 is_multicast_ether_addr(filter->dst_mac) ||
8460 is_multicast_ether_addr(filter->src_mac)) {
8461 /* MAC + IP : unsupported mode */
8462 if (filter->dst_ipv4)
8465 /* since we validated that L4 port must be valid before
8466 * we get here, start with respective "flags" value
8467 * and update if vlan is present or not
8469 cld_filter.element.flags =
8470 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
8472 if (filter->vlan_id) {
8473 cld_filter.element.flags =
8474 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
8477 } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8478 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
8479 cld_filter.element.flags =
8480 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
8481 if (filter->n_proto == ETH_P_IPV6)
8482 cld_filter.element.flags |=
8483 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8485 cld_filter.element.flags |=
8486 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8488 dev_err(&pf->pdev->dev,
8489 "either mac or ip has to be valid for cloud filter\n");
8493 /* Now copy L4 port in Byte 6..7 in general fields */
8494 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
8495 be16_to_cpu(filter->dst_port);
8498 /* Validate current device switch mode, change if necessary */
8499 ret = i40e_validate_and_set_switch_mode(vsi);
8501 dev_err(&pf->pdev->dev,
8502 "failed to set switch mode, ret %d\n",
8507 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
8510 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
8515 dev_dbg(&pf->pdev->dev,
8516 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
8517 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
8519 dev_info(&pf->pdev->dev,
8520 "%s cloud filter for VSI: %d, L4 port: %d\n",
8521 add ? "add" : "delete", filter->seid,
8522 ntohs(filter->dst_port));
8527 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
8528 * @vsi: Pointer to VSI
8529 * @f: Pointer to struct flow_cls_offload
8530 * @filter: Pointer to cloud filter structure
8533 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
8534 struct flow_cls_offload *f,
8535 struct i40e_cloud_filter *filter)
8537 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
8538 struct flow_dissector *dissector = rule->match.dissector;
8539 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
8540 struct i40e_pf *pf = vsi->back;
8543 if (dissector->used_keys &
8544 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
8545 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
8546 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
8547 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
8548 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
8549 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
8550 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
8551 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
8552 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%llx\n",
8553 dissector->used_keys);
8557 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
8558 struct flow_match_enc_keyid match;
8560 flow_rule_match_enc_keyid(rule, &match);
8561 if (match.mask->keyid != 0)
8562 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
8564 filter->tenant_id = be32_to_cpu(match.key->keyid);
8567 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
8568 struct flow_match_basic match;
8570 flow_rule_match_basic(rule, &match);
8571 n_proto_key = ntohs(match.key->n_proto);
8572 n_proto_mask = ntohs(match.mask->n_proto);
8574 if (n_proto_key == ETH_P_ALL) {
8578 filter->n_proto = n_proto_key & n_proto_mask;
8579 filter->ip_proto = match.key->ip_proto;
8582 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
8583 struct flow_match_eth_addrs match;
8585 flow_rule_match_eth_addrs(rule, &match);
8587 /* use is_broadcast and is_zero to check for all 0xf or 0 */
8588 if (!is_zero_ether_addr(match.mask->dst)) {
8589 if (is_broadcast_ether_addr(match.mask->dst)) {
8590 field_flags |= I40E_CLOUD_FIELD_OMAC;
8592 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
8598 if (!is_zero_ether_addr(match.mask->src)) {
8599 if (is_broadcast_ether_addr(match.mask->src)) {
8600 field_flags |= I40E_CLOUD_FIELD_IMAC;
8602 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
8607 ether_addr_copy(filter->dst_mac, match.key->dst);
8608 ether_addr_copy(filter->src_mac, match.key->src);
8611 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
8612 struct flow_match_vlan match;
8614 flow_rule_match_vlan(rule, &match);
8615 if (match.mask->vlan_id) {
8616 if (match.mask->vlan_id == VLAN_VID_MASK) {
8617 field_flags |= I40E_CLOUD_FIELD_IVLAN;
8620 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
8621 match.mask->vlan_id);
8626 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
8629 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
8630 struct flow_match_control match;
8632 flow_rule_match_control(rule, &match);
8633 addr_type = match.key->addr_type;
8636 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
8637 struct flow_match_ipv4_addrs match;
8639 flow_rule_match_ipv4_addrs(rule, &match);
8640 if (match.mask->dst) {
8641 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
8642 field_flags |= I40E_CLOUD_FIELD_IIP;
8644 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
8650 if (match.mask->src) {
8651 if (match.mask->src == cpu_to_be32(0xffffffff)) {
8652 field_flags |= I40E_CLOUD_FIELD_IIP;
8654 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
8660 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
8661 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
8664 filter->dst_ipv4 = match.key->dst;
8665 filter->src_ipv4 = match.key->src;
8668 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
8669 struct flow_match_ipv6_addrs match;
8671 flow_rule_match_ipv6_addrs(rule, &match);
8673 /* src and dest IPV6 address should not be LOOPBACK
8674 * (0:0:0:0:0:0:0:1), which can be represented as ::1
8676 if (ipv6_addr_loopback(&match.key->dst) ||
8677 ipv6_addr_loopback(&match.key->src)) {
8678 dev_err(&pf->pdev->dev,
8679 "Bad ipv6, addr is LOOPBACK\n");
8682 if (!ipv6_addr_any(&match.mask->dst) ||
8683 !ipv6_addr_any(&match.mask->src))
8684 field_flags |= I40E_CLOUD_FIELD_IIP;
8686 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
8687 sizeof(filter->src_ipv6));
8688 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
8689 sizeof(filter->dst_ipv6));
8692 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
8693 struct flow_match_ports match;
8695 flow_rule_match_ports(rule, &match);
8696 if (match.mask->src) {
8697 if (match.mask->src == cpu_to_be16(0xffff)) {
8698 field_flags |= I40E_CLOUD_FIELD_IIP;
8700 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
8701 be16_to_cpu(match.mask->src));
8706 if (match.mask->dst) {
8707 if (match.mask->dst == cpu_to_be16(0xffff)) {
8708 field_flags |= I40E_CLOUD_FIELD_IIP;
8710 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
8711 be16_to_cpu(match.mask->dst));
8716 filter->dst_port = match.key->dst;
8717 filter->src_port = match.key->src;
8719 switch (filter->ip_proto) {
8724 dev_err(&pf->pdev->dev,
8725 "Only UDP and TCP transport are supported\n");
8729 filter->flags = field_flags;
8734 * i40e_handle_tclass: Forward to a traffic class on the device
8735 * @vsi: Pointer to VSI
8736 * @tc: traffic class index on the device
8737 * @filter: Pointer to cloud filter structure
8740 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
8741 struct i40e_cloud_filter *filter)
8743 struct i40e_channel *ch, *ch_tmp;
8745 /* direct to a traffic class on the same device */
8747 filter->seid = vsi->seid;
8749 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
8750 if (!filter->dst_port) {
8751 dev_err(&vsi->back->pdev->dev,
8752 "Specify destination port to direct to traffic class that is not default\n");
8755 if (list_empty(&vsi->ch_list))
8757 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
8759 if (ch->seid == vsi->tc_seid_map[tc])
8760 filter->seid = ch->seid;
8764 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
8769 * i40e_configure_clsflower - Configure tc flower filters
8770 * @vsi: Pointer to VSI
8771 * @cls_flower: Pointer to struct flow_cls_offload
8774 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
8775 struct flow_cls_offload *cls_flower)
8777 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
8778 struct i40e_cloud_filter *filter = NULL;
8779 struct i40e_pf *pf = vsi->back;
8783 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
8788 dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
8792 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
8793 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
8796 if (pf->fdir_pf_active_filters ||
8797 (!hlist_empty(&pf->fdir_filter_list))) {
8798 dev_err(&vsi->back->pdev->dev,
8799 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8803 if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags)) {
8804 dev_err(&vsi->back->pdev->dev,
8805 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8806 clear_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags);
8807 clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, vsi->back->flags);
8810 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8814 filter->cookie = cls_flower->cookie;
8816 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8820 err = i40e_handle_tclass(vsi, tc, filter);
8824 /* Add cloud filter */
8825 if (filter->dst_port)
8826 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8828 err = i40e_add_del_cloud_filter(vsi, filter, true);
8831 dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n",
8836 /* add filter to the ordered list */
8837 INIT_HLIST_NODE(&filter->cloud_node);
8839 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8841 pf->num_cloud_filters++;
8850 * i40e_find_cloud_filter - Find the could filter in the list
8851 * @vsi: Pointer to VSI
8852 * @cookie: filter specific cookie
8855 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8856 unsigned long *cookie)
8858 struct i40e_cloud_filter *filter = NULL;
8859 struct hlist_node *node2;
8861 hlist_for_each_entry_safe(filter, node2,
8862 &vsi->back->cloud_filter_list, cloud_node)
8863 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8869 * i40e_delete_clsflower - Remove tc flower filters
8870 * @vsi: Pointer to VSI
8871 * @cls_flower: Pointer to struct flow_cls_offload
8874 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8875 struct flow_cls_offload *cls_flower)
8877 struct i40e_cloud_filter *filter = NULL;
8878 struct i40e_pf *pf = vsi->back;
8881 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8886 hash_del(&filter->cloud_node);
8888 if (filter->dst_port)
8889 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8891 err = i40e_add_del_cloud_filter(vsi, filter, false);
8895 dev_err(&pf->pdev->dev,
8896 "Failed to delete cloud filter, err %pe\n",
8898 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8901 pf->num_cloud_filters--;
8902 if (!pf->num_cloud_filters)
8903 if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) &&
8904 !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) {
8905 set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
8906 clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags);
8907 clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
8913 * i40e_setup_tc_cls_flower - flower classifier offloads
8914 * @np: net device to configure
8915 * @cls_flower: offload data
8917 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8918 struct flow_cls_offload *cls_flower)
8920 struct i40e_vsi *vsi = np->vsi;
8922 switch (cls_flower->command) {
8923 case FLOW_CLS_REPLACE:
8924 return i40e_configure_clsflower(vsi, cls_flower);
8925 case FLOW_CLS_DESTROY:
8926 return i40e_delete_clsflower(vsi, cls_flower);
8927 case FLOW_CLS_STATS:
8934 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8937 struct i40e_netdev_priv *np = cb_priv;
8939 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8943 case TC_SETUP_CLSFLOWER:
8944 return i40e_setup_tc_cls_flower(np, type_data);
8951 static LIST_HEAD(i40e_block_cb_list);
8953 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8956 struct i40e_netdev_priv *np = netdev_priv(netdev);
8959 case TC_SETUP_QDISC_MQPRIO:
8960 return i40e_setup_tc(netdev, type_data);
8961 case TC_SETUP_BLOCK:
8962 return flow_block_cb_setup_simple(type_data,
8963 &i40e_block_cb_list,
8964 i40e_setup_tc_block_cb,
8972 * i40e_open - Called when a network interface is made active
8973 * @netdev: network interface device structure
8975 * The open entry point is called when a network interface is made
8976 * active by the system (IFF_UP). At this point all resources needed
8977 * for transmit and receive operations are allocated, the interrupt
8978 * handler is registered with the OS, the netdev watchdog subtask is
8979 * enabled, and the stack is notified that the interface is ready.
8981 * Returns 0 on success, negative value on failure
8983 int i40e_open(struct net_device *netdev)
8985 struct i40e_netdev_priv *np = netdev_priv(netdev);
8986 struct i40e_vsi *vsi = np->vsi;
8987 struct i40e_pf *pf = vsi->back;
8990 /* disallow open during test or if eeprom is broken */
8991 if (test_bit(__I40E_TESTING, pf->state) ||
8992 test_bit(__I40E_BAD_EEPROM, pf->state))
8995 netif_carrier_off(netdev);
8997 if (i40e_force_link_state(pf, true))
9000 err = i40e_vsi_open(vsi);
9004 /* configure global TSO hardware offload settings */
9005 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
9006 TCP_FLAG_FIN) >> 16);
9007 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
9009 TCP_FLAG_CWR) >> 16);
9010 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
9011 udp_tunnel_get_rx_info(netdev);
9017 * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
9018 * @vsi: vsi structure
9020 * This updates netdev's number of tx/rx queues
9022 * Returns status of setting tx/rx queues
9024 static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
9028 ret = netif_set_real_num_rx_queues(vsi->netdev,
9029 vsi->num_queue_pairs);
9033 return netif_set_real_num_tx_queues(vsi->netdev,
9034 vsi->num_queue_pairs);
9039 * @vsi: the VSI to open
9041 * Finish initialization of the VSI.
9043 * Returns 0 on success, negative value on failure
9045 * Note: expects to be called while under rtnl_lock()
9047 int i40e_vsi_open(struct i40e_vsi *vsi)
9049 struct i40e_pf *pf = vsi->back;
9050 char int_name[I40E_INT_NAME_STR_LEN];
9053 /* allocate descriptors */
9054 err = i40e_vsi_setup_tx_resources(vsi);
9057 err = i40e_vsi_setup_rx_resources(vsi);
9061 err = i40e_vsi_configure(vsi);
9066 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
9067 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
9068 err = i40e_vsi_request_irq(vsi, int_name);
9072 /* Notify the stack of the actual queue counts. */
9073 err = i40e_netif_set_realnum_tx_rx_queues(vsi);
9075 goto err_set_queues;
9077 } else if (vsi->type == I40E_VSI_FDIR) {
9078 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
9079 dev_driver_string(&pf->pdev->dev),
9080 dev_name(&pf->pdev->dev));
9081 err = i40e_vsi_request_irq(vsi, int_name);
9090 err = i40e_up_complete(vsi);
9092 goto err_up_complete;
9099 i40e_vsi_free_irq(vsi);
9101 i40e_vsi_free_rx_resources(vsi);
9103 i40e_vsi_free_tx_resources(vsi);
9104 if (vsi == pf->vsi[pf->lan_vsi])
9105 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
9111 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
9112 * @pf: Pointer to PF
9114 * This function destroys the hlist where all the Flow Director
9115 * filters were saved.
9117 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
9119 struct i40e_fdir_filter *filter;
9120 struct i40e_flex_pit *pit_entry, *tmp;
9121 struct hlist_node *node2;
9123 hlist_for_each_entry_safe(filter, node2,
9124 &pf->fdir_filter_list, fdir_node) {
9125 hlist_del(&filter->fdir_node);
9129 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
9130 list_del(&pit_entry->list);
9133 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
9135 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
9136 list_del(&pit_entry->list);
9139 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
9141 pf->fdir_pf_active_filters = 0;
9142 i40e_reset_fdir_filter_cnt(pf);
9144 /* Reprogram the default input set for TCP/IPv4 */
9145 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9146 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9147 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9149 /* Reprogram the default input set for TCP/IPv6 */
9150 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
9151 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9152 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9154 /* Reprogram the default input set for UDP/IPv4 */
9155 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
9156 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9157 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9159 /* Reprogram the default input set for UDP/IPv6 */
9160 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
9161 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9162 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9164 /* Reprogram the default input set for SCTP/IPv4 */
9165 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
9166 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9167 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9169 /* Reprogram the default input set for SCTP/IPv6 */
9170 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
9171 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9172 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9174 /* Reprogram the default input set for Other/IPv4 */
9175 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
9176 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9178 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
9179 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9181 /* Reprogram the default input set for Other/IPv6 */
9182 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
9183 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9185 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
9186 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9190 * i40e_cloud_filter_exit - Cleans up the cloud filters
9191 * @pf: Pointer to PF
9193 * This function destroys the hlist where all the cloud filters
9196 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
9198 struct i40e_cloud_filter *cfilter;
9199 struct hlist_node *node;
9201 hlist_for_each_entry_safe(cfilter, node,
9202 &pf->cloud_filter_list, cloud_node) {
9203 hlist_del(&cfilter->cloud_node);
9206 pf->num_cloud_filters = 0;
9208 if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) &&
9209 !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) {
9210 set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
9211 clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags);
9212 clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
9217 * i40e_close - Disables a network interface
9218 * @netdev: network interface device structure
9220 * The close entry point is called when an interface is de-activated
9221 * by the OS. The hardware is still under the driver's control, but
9222 * this netdev interface is disabled.
9224 * Returns 0, this is not allowed to fail
9226 int i40e_close(struct net_device *netdev)
9228 struct i40e_netdev_priv *np = netdev_priv(netdev);
9229 struct i40e_vsi *vsi = np->vsi;
9231 i40e_vsi_close(vsi);
9237 * i40e_do_reset - Start a PF or Core Reset sequence
9238 * @pf: board private structure
9239 * @reset_flags: which reset is requested
9240 * @lock_acquired: indicates whether or not the lock has been acquired
9241 * before this function was called.
9243 * The essential difference in resets is that the PF Reset
9244 * doesn't clear the packet buffers, doesn't reset the PE
9245 * firmware, and doesn't bother the other PFs on the chip.
9247 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
9251 /* do the biggest reset indicated */
9252 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
9254 /* Request a Global Reset
9256 * This will start the chip's countdown to the actual full
9257 * chip reset event, and a warning interrupt to be sent
9258 * to all PFs, including the requestor. Our handler
9259 * for the warning interrupt will deal with the shutdown
9260 * and recovery of the switch setup.
9262 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
9263 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9264 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
9265 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9267 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
9269 /* Request a Core Reset
9271 * Same as Global Reset, except does *not* include the MAC/PHY
9273 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
9274 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9275 val |= I40E_GLGEN_RTRIG_CORER_MASK;
9276 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9277 i40e_flush(&pf->hw);
9279 } else if (reset_flags & I40E_PF_RESET_FLAG) {
9281 /* Request a PF Reset
9283 * Resets only the PF-specific registers
9285 * This goes directly to the tear-down and rebuild of
9286 * the switch, since we need to do all the recovery as
9287 * for the Core Reset.
9289 dev_dbg(&pf->pdev->dev, "PFR requested\n");
9290 i40e_handle_reset_warning(pf, lock_acquired);
9292 } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
9293 /* Request a PF Reset
9295 * Resets PF and reinitializes PFs VSI.
9297 i40e_prep_for_reset(pf);
9298 i40e_reset_and_rebuild(pf, true, lock_acquired);
9299 dev_info(&pf->pdev->dev,
9300 test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ?
9301 "FW LLDP is disabled\n" :
9302 "FW LLDP is enabled\n");
9304 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
9307 /* Find the VSI(s) that requested a re-init */
9308 dev_info(&pf->pdev->dev,
9309 "VSI reinit requested\n");
9310 for (v = 0; v < pf->num_alloc_vsi; v++) {
9311 struct i40e_vsi *vsi = pf->vsi[v];
9314 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
9316 i40e_vsi_reinit_locked(pf->vsi[v]);
9318 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
9321 /* Find the VSI(s) that needs to be brought down */
9322 dev_info(&pf->pdev->dev, "VSI down requested\n");
9323 for (v = 0; v < pf->num_alloc_vsi; v++) {
9324 struct i40e_vsi *vsi = pf->vsi[v];
9327 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
9329 set_bit(__I40E_VSI_DOWN, vsi->state);
9334 dev_info(&pf->pdev->dev,
9335 "bad reset request 0x%08x\n", reset_flags);
9339 #ifdef CONFIG_I40E_DCB
9341 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
9342 * @pf: board private structure
9343 * @old_cfg: current DCB config
9344 * @new_cfg: new DCB config
9346 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
9347 struct i40e_dcbx_config *old_cfg,
9348 struct i40e_dcbx_config *new_cfg)
9350 bool need_reconfig = false;
9352 /* Check if ETS configuration has changed */
9353 if (memcmp(&new_cfg->etscfg,
9355 sizeof(new_cfg->etscfg))) {
9356 /* If Priority Table has changed reconfig is needed */
9357 if (memcmp(&new_cfg->etscfg.prioritytable,
9358 &old_cfg->etscfg.prioritytable,
9359 sizeof(new_cfg->etscfg.prioritytable))) {
9360 need_reconfig = true;
9361 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
9364 if (memcmp(&new_cfg->etscfg.tcbwtable,
9365 &old_cfg->etscfg.tcbwtable,
9366 sizeof(new_cfg->etscfg.tcbwtable)))
9367 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
9369 if (memcmp(&new_cfg->etscfg.tsatable,
9370 &old_cfg->etscfg.tsatable,
9371 sizeof(new_cfg->etscfg.tsatable)))
9372 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
9375 /* Check if PFC configuration has changed */
9376 if (memcmp(&new_cfg->pfc,
9378 sizeof(new_cfg->pfc))) {
9379 need_reconfig = true;
9380 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
9383 /* Check if APP Table has changed */
9384 if (memcmp(&new_cfg->app,
9386 sizeof(new_cfg->app))) {
9387 need_reconfig = true;
9388 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
9391 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
9392 return need_reconfig;
9396 * i40e_handle_lldp_event - Handle LLDP Change MIB event
9397 * @pf: board private structure
9398 * @e: event info posted on ARQ
9400 static int i40e_handle_lldp_event(struct i40e_pf *pf,
9401 struct i40e_arq_event_info *e)
9403 struct i40e_aqc_lldp_get_mib *mib =
9404 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
9405 struct i40e_hw *hw = &pf->hw;
9406 struct i40e_dcbx_config tmp_dcbx_cfg;
9407 bool need_reconfig = false;
9411 /* X710-T*L 2.5G and 5G speeds don't support DCB */
9412 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9413 (hw->phy.link_info.link_speed &
9414 ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
9415 !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
9416 /* let firmware decide if the DCB should be disabled */
9417 set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
9419 /* Not DCB capable or capability disabled */
9420 if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
9423 /* Ignore if event is not for Nearest Bridge */
9424 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
9425 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
9426 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
9427 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
9430 /* Check MIB Type and return if event for Remote MIB update */
9431 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
9432 dev_dbg(&pf->pdev->dev,
9433 "LLDP event mib type %s\n", type ? "remote" : "local");
9434 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
9435 /* Update the remote cached instance and return */
9436 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
9437 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
9438 &hw->remote_dcbx_config);
9442 /* Store the old configuration */
9443 tmp_dcbx_cfg = hw->local_dcbx_config;
9445 /* Reset the old DCBx configuration data */
9446 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
9447 /* Get updated DCBX data from firmware */
9448 ret = i40e_get_dcb_config(&pf->hw);
9450 /* X710-T*L 2.5G and 5G speeds don't support DCB */
9451 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9452 (hw->phy.link_info.link_speed &
9453 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
9454 dev_warn(&pf->pdev->dev,
9455 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
9456 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
9458 dev_info(&pf->pdev->dev,
9459 "Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
9461 i40e_aq_str(&pf->hw,
9462 pf->hw.aq.asq_last_status));
9467 /* No change detected in DCBX configs */
9468 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
9469 sizeof(tmp_dcbx_cfg))) {
9470 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
9474 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
9475 &hw->local_dcbx_config);
9477 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
9482 /* Enable DCB tagging only when more than one TC */
9483 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
9484 set_bit(I40E_FLAG_DCB_ENA, pf->flags);
9486 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
9488 set_bit(__I40E_PORT_SUSPENDED, pf->state);
9489 /* Reconfiguration needed quiesce all VSIs */
9490 i40e_pf_quiesce_all_vsi(pf);
9492 /* Changes in configuration update VEB/VSI */
9493 i40e_dcb_reconfigure(pf);
9495 ret = i40e_resume_port_tx(pf);
9497 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
9498 /* In case of error no point in resuming VSIs */
9502 /* Wait for the PF's queues to be disabled */
9503 ret = i40e_pf_wait_queues_disabled(pf);
9505 /* Schedule PF reset to recover */
9506 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9507 i40e_service_event_schedule(pf);
9509 i40e_pf_unquiesce_all_vsi(pf);
9510 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
9511 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
9517 #endif /* CONFIG_I40E_DCB */
9520 * i40e_do_reset_safe - Protected reset path for userland calls.
9521 * @pf: board private structure
9522 * @reset_flags: which reset is requested
9525 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
9528 i40e_do_reset(pf, reset_flags, true);
9533 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
9534 * @pf: board private structure
9535 * @e: event info posted on ARQ
9537 * Handler for LAN Queue Overflow Event generated by the firmware for PF
9540 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
9541 struct i40e_arq_event_info *e)
9543 struct i40e_aqc_lan_overflow *data =
9544 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
9545 u32 queue = le32_to_cpu(data->prtdcb_rupto);
9546 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
9547 struct i40e_hw *hw = &pf->hw;
9551 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
9554 if (FIELD_GET(I40E_QTX_CTL_PFVF_Q_MASK, qtx_ctl) !=
9555 I40E_QTX_CTL_VF_QUEUE)
9558 /* Queue belongs to VF, find the VF and issue VF reset */
9559 vf_id = FIELD_GET(I40E_QTX_CTL_VFVM_INDX_MASK, qtx_ctl);
9560 vf_id -= hw->func_caps.vf_base_id;
9561 vf = &pf->vf[vf_id];
9562 i40e_vc_notify_vf_reset(vf);
9563 /* Allow VF to process pending reset notification */
9565 i40e_reset_vf(vf, false);
9569 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
9570 * @pf: board private structure
9572 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
9576 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9577 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
9582 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
9583 * @pf: board private structure
9585 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
9589 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9590 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
9591 FIELD_GET(I40E_PFQF_FDSTAT_BEST_CNT_MASK, val);
9596 * i40e_get_global_fd_count - Get total FD filters programmed on device
9597 * @pf: board private structure
9599 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
9603 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
9604 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
9605 FIELD_GET(I40E_GLQF_FDCNT_0_BESTCNT_MASK, val);
9610 * i40e_reenable_fdir_sb - Restore FDir SB capability
9611 * @pf: board private structure
9613 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
9615 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
9616 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
9617 (I40E_DEBUG_FD & pf->hw.debug_mask))
9618 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
9622 * i40e_reenable_fdir_atr - Restore FDir ATR capability
9623 * @pf: board private structure
9625 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
9627 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
9628 /* ATR uses the same filtering logic as SB rules. It only
9629 * functions properly if the input set mask is at the default
9630 * settings. It is safe to restore the default input set
9631 * because there are no active TCPv4 filter rules.
9633 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9634 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9635 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9637 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
9638 (I40E_DEBUG_FD & pf->hw.debug_mask))
9639 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
9644 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
9645 * @pf: board private structure
9646 * @filter: FDir filter to remove
9648 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
9649 struct i40e_fdir_filter *filter)
9651 /* Update counters */
9652 pf->fdir_pf_active_filters--;
9655 switch (filter->flow_type) {
9657 pf->fd_tcp4_filter_cnt--;
9660 pf->fd_udp4_filter_cnt--;
9663 pf->fd_sctp4_filter_cnt--;
9666 pf->fd_tcp6_filter_cnt--;
9669 pf->fd_udp6_filter_cnt--;
9672 pf->fd_udp6_filter_cnt--;
9675 switch (filter->ipl4_proto) {
9677 pf->fd_tcp4_filter_cnt--;
9680 pf->fd_udp4_filter_cnt--;
9683 pf->fd_sctp4_filter_cnt--;
9686 pf->fd_ip4_filter_cnt--;
9690 case IPV6_USER_FLOW:
9691 switch (filter->ipl4_proto) {
9693 pf->fd_tcp6_filter_cnt--;
9696 pf->fd_udp6_filter_cnt--;
9699 pf->fd_sctp6_filter_cnt--;
9702 pf->fd_ip6_filter_cnt--;
9708 /* Remove the filter from the list and free memory */
9709 hlist_del(&filter->fdir_node);
9714 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
9715 * @pf: board private structure
9717 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
9719 struct i40e_fdir_filter *filter;
9720 u32 fcnt_prog, fcnt_avail;
9721 struct hlist_node *node;
9723 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9726 /* Check if we have enough room to re-enable FDir SB capability. */
9727 fcnt_prog = i40e_get_global_fd_count(pf);
9728 fcnt_avail = pf->fdir_pf_filter_count;
9729 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
9730 (pf->fd_add_err == 0) ||
9731 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
9732 i40e_reenable_fdir_sb(pf);
9734 /* We should wait for even more space before re-enabling ATR.
9735 * Additionally, we cannot enable ATR as long as we still have TCP SB
9738 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
9739 pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0)
9740 i40e_reenable_fdir_atr(pf);
9742 /* if hw had a problem adding a filter, delete it */
9743 if (pf->fd_inv > 0) {
9744 hlist_for_each_entry_safe(filter, node,
9745 &pf->fdir_filter_list, fdir_node)
9746 if (filter->fd_id == pf->fd_inv)
9747 i40e_delete_invalid_filter(pf, filter);
9751 #define I40E_MIN_FD_FLUSH_INTERVAL 10
9752 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
9754 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
9755 * @pf: board private structure
9757 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
9759 unsigned long min_flush_time;
9760 int flush_wait_retry = 50;
9761 bool disable_atr = false;
9765 if (!time_after(jiffies, pf->fd_flush_timestamp +
9766 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
9769 /* If the flush is happening too quick and we have mostly SB rules we
9770 * should not re-enable ATR for some time.
9772 min_flush_time = pf->fd_flush_timestamp +
9773 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
9774 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
9776 if (!(time_after(jiffies, min_flush_time)) &&
9777 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
9778 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9779 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
9783 pf->fd_flush_timestamp = jiffies;
9784 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9785 /* flush all filters */
9786 wr32(&pf->hw, I40E_PFQF_CTL_1,
9787 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
9788 i40e_flush(&pf->hw);
9792 /* Check FD flush status every 5-6msec */
9793 usleep_range(5000, 6000);
9794 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
9795 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
9797 } while (flush_wait_retry--);
9798 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
9799 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
9801 /* replay sideband filters */
9802 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
9803 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
9804 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9805 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
9806 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9807 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
9812 * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed
9813 * @pf: board private structure
9815 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
9817 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
9821 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
9822 * @pf: board private structure
9824 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
9827 /* if interface is down do nothing */
9828 if (test_bit(__I40E_DOWN, pf->state))
9831 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9832 i40e_fdir_flush_and_replay(pf);
9834 i40e_fdir_check_and_reenable(pf);
9839 * i40e_vsi_link_event - notify VSI of a link event
9840 * @vsi: vsi to be notified
9841 * @link_up: link up or down
9843 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
9845 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
9848 switch (vsi->type) {
9850 if (!vsi->netdev || !vsi->netdev_registered)
9854 netif_carrier_on(vsi->netdev);
9855 netif_tx_wake_all_queues(vsi->netdev);
9857 netif_carrier_off(vsi->netdev);
9858 netif_tx_stop_all_queues(vsi->netdev);
9862 case I40E_VSI_SRIOV:
9863 case I40E_VSI_VMDQ2:
9865 case I40E_VSI_IWARP:
9866 case I40E_VSI_MIRROR:
9868 /* there is no notification for other VSIs */
9874 * i40e_veb_link_event - notify elements on the veb of a link event
9875 * @veb: veb to be notified
9876 * @link_up: link up or down
9878 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9883 if (!veb || !veb->pf)
9887 /* depth first... */
9888 for (i = 0; i < I40E_MAX_VEB; i++)
9889 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9890 i40e_veb_link_event(pf->veb[i], link_up);
9892 /* ... now the local VSIs */
9893 for (i = 0; i < pf->num_alloc_vsi; i++)
9894 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9895 i40e_vsi_link_event(pf->vsi[i], link_up);
9899 * i40e_link_event - Update netif_carrier status
9900 * @pf: board private structure
9902 static void i40e_link_event(struct i40e_pf *pf)
9904 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9905 u8 new_link_speed, old_link_speed;
9906 bool new_link, old_link;
9908 #ifdef CONFIG_I40E_DCB
9910 #endif /* CONFIG_I40E_DCB */
9912 /* set this to force the get_link_status call to refresh state */
9913 pf->hw.phy.get_link_info = true;
9914 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9915 status = i40e_get_link_status(&pf->hw, &new_link);
9917 /* On success, disable temp link polling */
9919 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9921 /* Enable link polling temporarily until i40e_get_link_status
9924 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9925 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9930 old_link_speed = pf->hw.phy.link_info_old.link_speed;
9931 new_link_speed = pf->hw.phy.link_info.link_speed;
9933 if (new_link == old_link &&
9934 new_link_speed == old_link_speed &&
9935 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9936 new_link == netif_carrier_ok(vsi->netdev)))
9939 i40e_print_link_message(vsi, new_link);
9941 /* Notify the base of the switch tree connected to
9942 * the link. Floating VEBs are not notified.
9944 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9945 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9947 i40e_vsi_link_event(vsi, new_link);
9950 i40e_vc_notify_link_state(pf);
9952 if (test_bit(I40E_FLAG_PTP_ENA, pf->flags))
9953 i40e_ptp_set_increment(pf);
9954 #ifdef CONFIG_I40E_DCB
9955 if (new_link == old_link)
9957 /* Not SW DCB so firmware will take care of default settings */
9958 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
9961 /* We cover here only link down, as after link up in case of SW DCB
9962 * SW LLDP agent will take care of setting it up
9965 dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n");
9966 memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
9967 err = i40e_dcb_sw_default_config(pf);
9969 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
9970 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
9972 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
9973 DCB_CAP_DCBX_VER_IEEE;
9974 set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
9975 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
9978 #endif /* CONFIG_I40E_DCB */
9982 * i40e_watchdog_subtask - periodic checks not using event driven response
9983 * @pf: board private structure
9985 static void i40e_watchdog_subtask(struct i40e_pf *pf)
9989 /* if interface is down do nothing */
9990 if (test_bit(__I40E_DOWN, pf->state) ||
9991 test_bit(__I40E_CONFIG_BUSY, pf->state))
9994 /* make sure we don't do these things too often */
9995 if (time_before(jiffies, (pf->service_timer_previous +
9996 pf->service_timer_period)))
9998 pf->service_timer_previous = jiffies;
10000 if (test_bit(I40E_FLAG_LINK_POLLING_ENA, pf->flags) ||
10001 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
10002 i40e_link_event(pf);
10004 /* Update the stats for active netdevs so the network stack
10005 * can look at updated numbers whenever it cares to
10007 for (i = 0; i < pf->num_alloc_vsi; i++)
10008 if (pf->vsi[i] && pf->vsi[i]->netdev)
10009 i40e_update_stats(pf->vsi[i]);
10011 if (test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)) {
10012 /* Update the stats for the active switching components */
10013 for (i = 0; i < I40E_MAX_VEB; i++)
10015 i40e_update_veb_stats(pf->veb[i]);
10018 i40e_ptp_rx_hang(pf);
10019 i40e_ptp_tx_hang(pf);
10023 * i40e_reset_subtask - Set up for resetting the device and driver
10024 * @pf: board private structure
10026 static void i40e_reset_subtask(struct i40e_pf *pf)
10028 u32 reset_flags = 0;
10030 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
10031 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
10032 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
10034 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
10035 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
10036 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
10038 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
10039 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
10040 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
10042 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
10043 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
10044 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
10046 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
10047 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
10048 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
10051 /* If there's a recovery already waiting, it takes
10052 * precedence before starting a new reset sequence.
10054 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
10055 i40e_prep_for_reset(pf);
10057 i40e_rebuild(pf, false, false);
10060 /* If we're already down or resetting, just bail */
10062 !test_bit(__I40E_DOWN, pf->state) &&
10063 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
10064 i40e_do_reset(pf, reset_flags, false);
10069 * i40e_handle_link_event - Handle link event
10070 * @pf: board private structure
10071 * @e: event info posted on ARQ
10073 static void i40e_handle_link_event(struct i40e_pf *pf,
10074 struct i40e_arq_event_info *e)
10076 struct i40e_aqc_get_link_status *status =
10077 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
10079 /* Do a new status request to re-enable LSE reporting
10080 * and load new status information into the hw struct
10081 * This completely ignores any state information
10082 * in the ARQ event info, instead choosing to always
10083 * issue the AQ update link status command.
10085 i40e_link_event(pf);
10087 /* Check if module meets thermal requirements */
10088 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
10089 dev_err(&pf->pdev->dev,
10090 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
10091 dev_err(&pf->pdev->dev,
10092 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
10094 /* check for unqualified module, if link is down, suppress
10095 * the message if link was forced to be down.
10097 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
10098 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
10099 (!(status->link_info & I40E_AQ_LINK_UP)) &&
10100 (!test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))) {
10101 dev_err(&pf->pdev->dev,
10102 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
10103 dev_err(&pf->pdev->dev,
10104 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
10110 * i40e_clean_adminq_subtask - Clean the AdminQ rings
10111 * @pf: board private structure
10113 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
10115 struct i40e_arq_event_info event;
10116 struct i40e_hw *hw = &pf->hw;
10117 u16 pending, i = 0;
10123 /* Do not run clean AQ when PF reset fails */
10124 if (test_bit(__I40E_RESET_FAILED, pf->state))
10127 /* check for error indications */
10128 val = rd32(&pf->hw, I40E_PF_ARQLEN);
10130 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
10131 if (hw->debug_mask & I40E_DEBUG_AQ)
10132 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
10133 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
10135 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
10136 if (hw->debug_mask & I40E_DEBUG_AQ)
10137 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
10138 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
10139 pf->arq_overflows++;
10141 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
10142 if (hw->debug_mask & I40E_DEBUG_AQ)
10143 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
10144 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
10147 wr32(&pf->hw, I40E_PF_ARQLEN, val);
10149 val = rd32(&pf->hw, I40E_PF_ATQLEN);
10151 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
10152 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10153 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
10154 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
10156 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
10157 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10158 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
10159 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
10161 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
10162 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10163 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
10164 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
10167 wr32(&pf->hw, I40E_PF_ATQLEN, val);
10169 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
10170 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
10171 if (!event.msg_buf)
10175 ret = i40e_clean_arq_element(hw, &event, &pending);
10176 if (ret == -EALREADY)
10179 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
10183 opcode = le16_to_cpu(event.desc.opcode);
10186 case i40e_aqc_opc_get_link_status:
10188 i40e_handle_link_event(pf, &event);
10191 case i40e_aqc_opc_send_msg_to_pf:
10192 ret = i40e_vc_process_vf_msg(pf,
10193 le16_to_cpu(event.desc.retval),
10194 le32_to_cpu(event.desc.cookie_high),
10195 le32_to_cpu(event.desc.cookie_low),
10199 case i40e_aqc_opc_lldp_update_mib:
10200 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
10201 #ifdef CONFIG_I40E_DCB
10203 i40e_handle_lldp_event(pf, &event);
10205 #endif /* CONFIG_I40E_DCB */
10207 case i40e_aqc_opc_event_lan_overflow:
10208 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
10209 i40e_handle_lan_overflow_event(pf, &event);
10211 case i40e_aqc_opc_send_msg_to_peer:
10212 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
10214 case i40e_aqc_opc_nvm_erase:
10215 case i40e_aqc_opc_nvm_update:
10216 case i40e_aqc_opc_oem_post_update:
10217 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
10218 "ARQ NVM operation 0x%04x completed\n",
10222 dev_info(&pf->pdev->dev,
10223 "ARQ: Unknown event 0x%04x ignored\n",
10227 } while (i++ < I40E_AQ_WORK_LIMIT);
10229 if (i < I40E_AQ_WORK_LIMIT)
10230 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
10232 /* re-enable Admin queue interrupt cause */
10233 val = rd32(hw, I40E_PFINT_ICR0_ENA);
10234 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
10235 wr32(hw, I40E_PFINT_ICR0_ENA, val);
10238 kfree(event.msg_buf);
10242 * i40e_verify_eeprom - make sure eeprom is good to use
10243 * @pf: board private structure
10245 static void i40e_verify_eeprom(struct i40e_pf *pf)
10249 err = i40e_diag_eeprom_test(&pf->hw);
10251 /* retry in case of garbage read */
10252 err = i40e_diag_eeprom_test(&pf->hw);
10254 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
10256 set_bit(__I40E_BAD_EEPROM, pf->state);
10260 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
10261 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
10262 clear_bit(__I40E_BAD_EEPROM, pf->state);
10267 * i40e_enable_pf_switch_lb
10268 * @pf: pointer to the PF structure
10270 * enable switch loop back or die - no point in a return value
10272 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
10274 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10275 struct i40e_vsi_context ctxt;
10278 ctxt.seid = pf->main_vsi_seid;
10279 ctxt.pf_num = pf->hw.pf_id;
10281 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10283 dev_info(&pf->pdev->dev,
10284 "couldn't get PF vsi config, err %pe aq_err %s\n",
10286 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10289 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10290 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10291 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10293 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10295 dev_info(&pf->pdev->dev,
10296 "update vsi switch failed, err %pe aq_err %s\n",
10298 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10303 * i40e_disable_pf_switch_lb
10304 * @pf: pointer to the PF structure
10306 * disable switch loop back or die - no point in a return value
10308 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
10310 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10311 struct i40e_vsi_context ctxt;
10314 ctxt.seid = pf->main_vsi_seid;
10315 ctxt.pf_num = pf->hw.pf_id;
10317 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10319 dev_info(&pf->pdev->dev,
10320 "couldn't get PF vsi config, err %pe aq_err %s\n",
10322 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10325 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10326 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10327 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10329 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10331 dev_info(&pf->pdev->dev,
10332 "update vsi switch failed, err %pe aq_err %s\n",
10334 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10339 * i40e_config_bridge_mode - Configure the HW bridge mode
10340 * @veb: pointer to the bridge instance
10342 * Configure the loop back mode for the LAN VSI that is downlink to the
10343 * specified HW bridge instance. It is expected this function is called
10344 * when a new HW bridge is instantiated.
10346 static void i40e_config_bridge_mode(struct i40e_veb *veb)
10348 struct i40e_pf *pf = veb->pf;
10350 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
10351 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
10352 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
10353 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
10354 i40e_disable_pf_switch_lb(pf);
10356 i40e_enable_pf_switch_lb(pf);
10360 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
10361 * @veb: pointer to the VEB instance
10363 * This is a recursive function that first builds the attached VSIs then
10364 * recurses in to build the next layer of VEB. We track the connections
10365 * through our own index numbers because the seid's from the HW could
10366 * change across the reset.
10368 static int i40e_reconstitute_veb(struct i40e_veb *veb)
10370 struct i40e_vsi *ctl_vsi = NULL;
10371 struct i40e_pf *pf = veb->pf;
10375 /* build VSI that owns this VEB, temporarily attached to base VEB */
10376 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
10378 pf->vsi[v]->veb_idx == veb->idx &&
10379 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
10380 ctl_vsi = pf->vsi[v];
10385 dev_info(&pf->pdev->dev,
10386 "missing owner VSI for veb_idx %d\n", veb->idx);
10388 goto end_reconstitute;
10390 if (ctl_vsi != pf->vsi[pf->lan_vsi])
10391 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10392 ret = i40e_add_vsi(ctl_vsi);
10394 dev_info(&pf->pdev->dev,
10395 "rebuild of veb_idx %d owner VSI failed: %d\n",
10397 goto end_reconstitute;
10399 i40e_vsi_reset_stats(ctl_vsi);
10401 /* create the VEB in the switch and move the VSI onto the VEB */
10402 ret = i40e_add_veb(veb, ctl_vsi);
10404 goto end_reconstitute;
10406 if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
10407 veb->bridge_mode = BRIDGE_MODE_VEB;
10409 veb->bridge_mode = BRIDGE_MODE_VEPA;
10410 i40e_config_bridge_mode(veb);
10412 /* create the remaining VSIs attached to this VEB */
10413 for (v = 0; v < pf->num_alloc_vsi; v++) {
10414 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
10417 if (pf->vsi[v]->veb_idx == veb->idx) {
10418 struct i40e_vsi *vsi = pf->vsi[v];
10420 vsi->uplink_seid = veb->seid;
10421 ret = i40e_add_vsi(vsi);
10423 dev_info(&pf->pdev->dev,
10424 "rebuild of vsi_idx %d failed: %d\n",
10426 goto end_reconstitute;
10428 i40e_vsi_reset_stats(vsi);
10432 /* create any VEBs attached to this VEB - RECURSION */
10433 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10434 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
10435 pf->veb[veb_idx]->uplink_seid = veb->seid;
10436 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
10447 * i40e_get_capabilities - get info about the HW
10448 * @pf: the PF struct
10449 * @list_type: AQ capability to be queried
10451 static int i40e_get_capabilities(struct i40e_pf *pf,
10452 enum i40e_admin_queue_opc list_type)
10454 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
10459 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
10461 cap_buf = kzalloc(buf_len, GFP_KERNEL);
10465 /* this loads the data into the hw struct for us */
10466 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
10467 &data_size, list_type,
10469 /* data loaded, buffer no longer needed */
10472 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
10473 /* retry with a larger buffer */
10474 buf_len = data_size;
10475 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
10476 dev_info(&pf->pdev->dev,
10477 "capability discovery failed, err %pe aq_err %s\n",
10479 i40e_aq_str(&pf->hw,
10480 pf->hw.aq.asq_last_status));
10485 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
10486 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10487 dev_info(&pf->pdev->dev,
10488 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
10489 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
10490 pf->hw.func_caps.num_msix_vectors,
10491 pf->hw.func_caps.num_msix_vectors_vf,
10492 pf->hw.func_caps.fd_filters_guaranteed,
10493 pf->hw.func_caps.fd_filters_best_effort,
10494 pf->hw.func_caps.num_tx_qp,
10495 pf->hw.func_caps.num_vsis);
10496 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
10497 dev_info(&pf->pdev->dev,
10498 "switch_mode=0x%04x, function_valid=0x%08x\n",
10499 pf->hw.dev_caps.switch_mode,
10500 pf->hw.dev_caps.valid_functions);
10501 dev_info(&pf->pdev->dev,
10502 "SR-IOV=%d, num_vfs for all function=%u\n",
10503 pf->hw.dev_caps.sr_iov_1_1,
10504 pf->hw.dev_caps.num_vfs);
10505 dev_info(&pf->pdev->dev,
10506 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
10507 pf->hw.dev_caps.num_vsis,
10508 pf->hw.dev_caps.num_rx_qp,
10509 pf->hw.dev_caps.num_tx_qp);
10512 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10513 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
10514 + pf->hw.func_caps.num_vfs)
10515 if (pf->hw.revision_id == 0 &&
10516 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
10517 dev_info(&pf->pdev->dev,
10518 "got num_vsis %d, setting num_vsis to %d\n",
10519 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
10520 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
10526 static int i40e_vsi_clear(struct i40e_vsi *vsi);
10529 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
10530 * @pf: board private structure
10532 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
10534 struct i40e_vsi *vsi;
10536 /* quick workaround for an NVM issue that leaves a critical register
10539 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
10540 static const u32 hkey[] = {
10541 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
10542 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
10543 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
10547 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
10548 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
10551 if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
10554 /* find existing VSI and see if it needs configuring */
10555 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10557 /* create a new VSI if none exists */
10559 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
10560 pf->vsi[pf->lan_vsi]->seid, 0);
10562 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
10563 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
10564 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
10569 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
10573 * i40e_fdir_teardown - release the Flow Director resources
10574 * @pf: board private structure
10576 static void i40e_fdir_teardown(struct i40e_pf *pf)
10578 struct i40e_vsi *vsi;
10580 i40e_fdir_filter_exit(pf);
10581 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10583 i40e_vsi_release(vsi);
10587 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
10588 * @vsi: PF main vsi
10589 * @seid: seid of main or channel VSIs
10591 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
10592 * existed before reset
10594 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
10596 struct i40e_cloud_filter *cfilter;
10597 struct i40e_pf *pf = vsi->back;
10598 struct hlist_node *node;
10601 /* Add cloud filters back if they exist */
10602 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
10604 if (cfilter->seid != seid)
10607 if (cfilter->dst_port)
10608 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
10611 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
10614 dev_dbg(&pf->pdev->dev,
10615 "Failed to rebuild cloud filter, err %pe aq_err %s\n",
10617 i40e_aq_str(&pf->hw,
10618 pf->hw.aq.asq_last_status));
10626 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
10627 * @vsi: PF main vsi
10629 * Rebuilds channel VSIs if they existed before reset
10631 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
10633 struct i40e_channel *ch, *ch_tmp;
10636 if (list_empty(&vsi->ch_list))
10639 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
10640 if (!ch->initialized)
10642 /* Proceed with creation of channel (VMDq2) VSI */
10643 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
10645 dev_info(&vsi->back->pdev->dev,
10646 "failed to rebuild channels using uplink_seid %u\n",
10650 /* Reconfigure TX queues using QTX_CTL register */
10651 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
10653 dev_info(&vsi->back->pdev->dev,
10654 "failed to configure TX rings for channel %u\n",
10658 /* update 'next_base_queue' */
10659 vsi->next_base_queue = vsi->next_base_queue +
10660 ch->num_queue_pairs;
10661 if (ch->max_tx_rate) {
10662 u64 credits = ch->max_tx_rate;
10664 if (i40e_set_bw_limit(vsi, ch->seid,
10668 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10669 dev_dbg(&vsi->back->pdev->dev,
10670 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10675 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
10677 dev_dbg(&vsi->back->pdev->dev,
10678 "Failed to rebuild cloud filters for channel VSI %u\n",
10687 * i40e_clean_xps_state - clean xps state for every tx_ring
10688 * @vsi: ptr to the VSI
10690 static void i40e_clean_xps_state(struct i40e_vsi *vsi)
10695 for (i = 0; i < vsi->num_queue_pairs; i++)
10696 if (vsi->tx_rings[i])
10697 clear_bit(__I40E_TX_XPS_INIT_DONE,
10698 vsi->tx_rings[i]->state);
10702 * i40e_prep_for_reset - prep for the core to reset
10703 * @pf: board private structure
10705 * Close up the VFs and other things in prep for PF Reset.
10707 static void i40e_prep_for_reset(struct i40e_pf *pf)
10709 struct i40e_hw *hw = &pf->hw;
10713 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
10714 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
10716 if (i40e_check_asq_alive(&pf->hw))
10717 i40e_vc_notify_reset(pf);
10719 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
10721 /* quiesce the VSIs and their queues that are not already DOWN */
10722 i40e_pf_quiesce_all_vsi(pf);
10724 for (v = 0; v < pf->num_alloc_vsi; v++) {
10726 i40e_clean_xps_state(pf->vsi[v]);
10727 pf->vsi[v]->seid = 0;
10731 i40e_shutdown_adminq(&pf->hw);
10733 /* call shutdown HMC */
10734 if (hw->hmc.hmc_obj) {
10735 ret = i40e_shutdown_lan_hmc(hw);
10737 dev_warn(&pf->pdev->dev,
10738 "shutdown_lan_hmc failed: %d\n", ret);
10741 /* Save the current PTP time so that we can restore the time after the
10744 i40e_ptp_save_hw_time(pf);
10748 * i40e_send_version - update firmware with driver version
10751 static void i40e_send_version(struct i40e_pf *pf)
10753 struct i40e_driver_version dv;
10755 dv.major_version = 0xff;
10756 dv.minor_version = 0xff;
10757 dv.build_version = 0xff;
10758 dv.subbuild_version = 0;
10759 strscpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
10760 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
10764 * i40e_get_oem_version - get OEM specific version information
10765 * @hw: pointer to the hardware structure
10767 static void i40e_get_oem_version(struct i40e_hw *hw)
10769 u16 block_offset = 0xffff;
10770 u16 block_length = 0;
10771 u16 capabilities = 0;
10775 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
10776 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
10777 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
10778 #define I40E_NVM_OEM_GEN_OFFSET 0x02
10779 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
10780 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
10781 #define I40E_NVM_OEM_LENGTH 3
10783 /* Check if pointer to OEM version block is valid. */
10784 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
10785 if (block_offset == 0xffff)
10788 /* Check if OEM version block has correct length. */
10789 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
10791 if (block_length < I40E_NVM_OEM_LENGTH)
10794 /* Check if OEM version format is as expected. */
10795 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
10797 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
10800 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
10802 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
10805 FIELD_PREP(I40E_OEM_GEN_MASK | I40E_OEM_SNAP_MASK, gen_snap) |
10806 FIELD_PREP(I40E_OEM_RELEASE_MASK, release);
10807 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
10811 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
10812 * @pf: board private structure
10814 static int i40e_reset(struct i40e_pf *pf)
10816 struct i40e_hw *hw = &pf->hw;
10819 ret = i40e_pf_reset(hw);
10821 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
10822 set_bit(__I40E_RESET_FAILED, pf->state);
10823 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10831 * i40e_rebuild - rebuild using a saved config
10832 * @pf: board private structure
10833 * @reinit: if the Main VSI needs to re-initialized.
10834 * @lock_acquired: indicates whether or not the lock has been acquired
10835 * before this function was called.
10837 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
10839 const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
10840 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10841 struct i40e_hw *hw = &pf->hw;
10846 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10847 is_recovery_mode_reported)
10848 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
10850 if (test_bit(__I40E_DOWN, pf->state) &&
10851 !test_bit(__I40E_RECOVERY_MODE, pf->state))
10852 goto clear_recovery;
10853 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
10855 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
10856 ret = i40e_init_adminq(&pf->hw);
10858 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n",
10860 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10861 goto clear_recovery;
10863 i40e_get_oem_version(&pf->hw);
10865 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
10866 /* The following delay is necessary for firmware update. */
10870 /* re-verify the eeprom if we just had an EMP reset */
10871 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
10872 i40e_verify_eeprom(pf);
10874 /* if we are going out of or into recovery mode we have to act
10875 * accordingly with regard to resources initialization
10876 * and deinitialization
10878 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10879 if (i40e_get_capabilities(pf,
10880 i40e_aqc_opc_list_func_capabilities))
10883 if (is_recovery_mode_reported) {
10884 /* we're staying in recovery mode so we'll reinitialize
10887 if (i40e_setup_misc_vector_for_recovery_mode(pf))
10890 if (!lock_acquired)
10892 /* we're going out of recovery mode so we'll free
10893 * the IRQ allocated specifically for recovery mode
10894 * and restore the interrupt scheme
10896 free_irq(pf->pdev->irq, pf);
10897 i40e_clear_interrupt_scheme(pf);
10898 if (i40e_restore_interrupt_scheme(pf))
10902 /* tell the firmware that we're starting */
10903 i40e_send_version(pf);
10905 /* bail out in case recovery mode was detected, as there is
10906 * no need for further configuration.
10911 i40e_clear_pxe_mode(hw);
10912 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10914 goto end_core_reset;
10916 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10917 hw->func_caps.num_rx_qp, 0, 0);
10919 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10920 goto end_core_reset;
10922 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10924 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10925 goto end_core_reset;
10928 #ifdef CONFIG_I40E_DCB
10929 /* Enable FW to write a default DCB config on link-up
10930 * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
10931 * is not supported with new link speed
10933 if (i40e_is_tc_mqprio_enabled(pf)) {
10934 i40e_aq_set_dcb_parameters(hw, false, NULL);
10936 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
10937 (hw->phy.link_info.link_speed &
10938 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
10939 i40e_aq_set_dcb_parameters(hw, false, NULL);
10940 dev_warn(&pf->pdev->dev,
10941 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
10942 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
10944 i40e_aq_set_dcb_parameters(hw, true, NULL);
10945 ret = i40e_init_pf_dcb(pf);
10947 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
10949 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
10950 /* Continue without DCB enabled */
10955 #endif /* CONFIG_I40E_DCB */
10956 if (!lock_acquired)
10958 ret = i40e_setup_pf_switch(pf, reinit, true);
10962 /* The driver only wants link up/down and module qualification
10963 * reports from firmware. Note the negative logic.
10965 ret = i40e_aq_set_phy_int_mask(&pf->hw,
10966 ~(I40E_AQ_EVENT_LINK_UPDOWN |
10967 I40E_AQ_EVENT_MEDIA_NA |
10968 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10970 dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
10972 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10974 /* Rebuild the VSIs and VEBs that existed before reset.
10975 * They are still in our local switch element arrays, so only
10976 * need to rebuild the switch model in the HW.
10978 * If there were VEBs but the reconstitution failed, we'll try
10979 * to recover minimal use by getting the basic PF VSI working.
10981 if (vsi->uplink_seid != pf->mac_seid) {
10982 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10983 /* find the one VEB connected to the MAC, and find orphans */
10984 for (v = 0; v < I40E_MAX_VEB; v++) {
10988 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10989 pf->veb[v]->uplink_seid == 0) {
10990 ret = i40e_reconstitute_veb(pf->veb[v]);
10995 /* If Main VEB failed, we're in deep doodoo,
10996 * so give up rebuilding the switch and set up
10997 * for minimal rebuild of PF VSI.
10998 * If orphan failed, we'll report the error
10999 * but try to keep going.
11001 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
11002 dev_info(&pf->pdev->dev,
11003 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
11005 vsi->uplink_seid = pf->mac_seid;
11007 } else if (pf->veb[v]->uplink_seid == 0) {
11008 dev_info(&pf->pdev->dev,
11009 "rebuild of orphan VEB failed: %d\n",
11016 if (vsi->uplink_seid == pf->mac_seid) {
11017 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
11018 /* no VEB, so rebuild only the Main VSI */
11019 ret = i40e_add_vsi(vsi);
11021 dev_info(&pf->pdev->dev,
11022 "rebuild of Main VSI failed: %d\n", ret);
11027 if (vsi->mqprio_qopt.max_rate[0]) {
11028 u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
11029 vsi->mqprio_qopt.max_rate[0]);
11032 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
11036 credits = max_tx_rate;
11037 do_div(credits, I40E_BW_CREDIT_DIVISOR);
11038 dev_dbg(&vsi->back->pdev->dev,
11039 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
11045 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
11049 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
11050 * for this main VSI if they exist
11052 ret = i40e_rebuild_channels(vsi);
11056 /* Reconfigure hardware for allowing smaller MSS in the case
11057 * of TSO, so that we avoid the MDD being fired and causing
11058 * a reset in the case of small MSS+TSO.
11060 #define I40E_REG_MSS 0x000E64DC
11061 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
11062 #define I40E_64BYTE_MSS 0x400000
11063 val = rd32(hw, I40E_REG_MSS);
11064 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11065 val &= ~I40E_REG_MSS_MIN_MASK;
11066 val |= I40E_64BYTE_MSS;
11067 wr32(hw, I40E_REG_MSS, val);
11070 if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) {
11072 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
11074 dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
11076 i40e_aq_str(&pf->hw,
11077 pf->hw.aq.asq_last_status));
11079 /* reinit the misc interrupt */
11080 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
11081 ret = i40e_setup_misc_vector(pf);
11086 /* Add a filter to drop all Flow control frames from any VSI from being
11087 * transmitted. By doing so we stop a malicious VF from sending out
11088 * PAUSE or PFC frames and potentially controlling traffic for other
11090 * The FW can still send Flow control frames if enabled.
11092 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11093 pf->main_vsi_seid);
11095 /* restart the VSIs that were rebuilt and running before the reset */
11096 i40e_pf_unquiesce_all_vsi(pf);
11098 /* Release the RTNL lock before we start resetting VFs */
11099 if (!lock_acquired)
11102 /* Restore promiscuous settings */
11103 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
11105 dev_warn(&pf->pdev->dev,
11106 "Failed to restore promiscuous setting: %s, err %pe aq_err %s\n",
11107 pf->cur_promisc ? "on" : "off",
11109 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11111 i40e_reset_all_vfs(pf, true);
11113 /* tell the firmware that we're starting */
11114 i40e_send_version(pf);
11116 /* We've already released the lock, so don't do it again */
11117 goto end_core_reset;
11120 if (!lock_acquired)
11123 clear_bit(__I40E_RESET_FAILED, pf->state);
11125 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
11126 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
11130 * i40e_reset_and_rebuild - reset and rebuild using a saved config
11131 * @pf: board private structure
11132 * @reinit: if the Main VSI needs to re-initialized.
11133 * @lock_acquired: indicates whether or not the lock has been acquired
11134 * before this function was called.
11136 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
11137 bool lock_acquired)
11141 if (test_bit(__I40E_IN_REMOVE, pf->state))
11143 /* Now we wait for GRST to settle out.
11144 * We don't have to delete the VEBs or VSIs from the hw switch
11145 * because the reset will make them disappear.
11147 ret = i40e_reset(pf);
11149 i40e_rebuild(pf, reinit, lock_acquired);
11153 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
11154 * @pf: board private structure
11156 * Close up the VFs and other things in prep for a Core Reset,
11157 * then get ready to rebuild the world.
11158 * @lock_acquired: indicates whether or not the lock has been acquired
11159 * before this function was called.
11161 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
11163 i40e_prep_for_reset(pf);
11164 i40e_reset_and_rebuild(pf, false, lock_acquired);
11168 * i40e_handle_mdd_event
11169 * @pf: pointer to the PF structure
11171 * Called from the MDD irq handler to identify possibly malicious vfs
11173 static void i40e_handle_mdd_event(struct i40e_pf *pf)
11175 struct i40e_hw *hw = &pf->hw;
11176 bool mdd_detected = false;
11177 struct i40e_vf *vf;
11181 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
11184 /* find what triggered the MDD event */
11185 reg = rd32(hw, I40E_GL_MDET_TX);
11186 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
11187 u8 pf_num = FIELD_GET(I40E_GL_MDET_TX_PF_NUM_MASK, reg);
11188 u16 vf_num = FIELD_GET(I40E_GL_MDET_TX_VF_NUM_MASK, reg);
11189 u8 event = FIELD_GET(I40E_GL_MDET_TX_EVENT_MASK, reg);
11190 u16 queue = FIELD_GET(I40E_GL_MDET_TX_QUEUE_MASK, reg) -
11191 pf->hw.func_caps.base_queue;
11192 if (netif_msg_tx_err(pf))
11193 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
11194 event, queue, pf_num, vf_num);
11195 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
11196 mdd_detected = true;
11198 reg = rd32(hw, I40E_GL_MDET_RX);
11199 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
11200 u8 func = FIELD_GET(I40E_GL_MDET_RX_FUNCTION_MASK, reg);
11201 u8 event = FIELD_GET(I40E_GL_MDET_RX_EVENT_MASK, reg);
11202 u16 queue = FIELD_GET(I40E_GL_MDET_RX_QUEUE_MASK, reg) -
11203 pf->hw.func_caps.base_queue;
11204 if (netif_msg_rx_err(pf))
11205 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
11206 event, queue, func);
11207 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
11208 mdd_detected = true;
11211 if (mdd_detected) {
11212 reg = rd32(hw, I40E_PF_MDET_TX);
11213 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
11214 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
11215 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
11217 reg = rd32(hw, I40E_PF_MDET_RX);
11218 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
11219 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
11220 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
11224 /* see if one of the VFs needs its hand slapped */
11225 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
11227 reg = rd32(hw, I40E_VP_MDET_TX(i));
11228 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
11229 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
11230 vf->num_mdd_events++;
11231 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
11233 dev_info(&pf->pdev->dev,
11234 "Use PF Control I/F to re-enable the VF\n");
11235 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
11238 reg = rd32(hw, I40E_VP_MDET_RX(i));
11239 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
11240 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
11241 vf->num_mdd_events++;
11242 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
11244 dev_info(&pf->pdev->dev,
11245 "Use PF Control I/F to re-enable the VF\n");
11246 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
11250 /* re-enable mdd interrupt cause */
11251 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
11252 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
11253 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
11254 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
11259 * i40e_service_task - Run the driver's async subtasks
11260 * @work: pointer to work_struct containing our data
11262 static void i40e_service_task(struct work_struct *work)
11264 struct i40e_pf *pf = container_of(work,
11267 unsigned long start_time = jiffies;
11269 /* don't bother with service tasks if a reset is in progress */
11270 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
11271 test_bit(__I40E_SUSPENDED, pf->state))
11274 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
11277 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
11278 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
11279 i40e_sync_filters_subtask(pf);
11280 i40e_reset_subtask(pf);
11281 i40e_handle_mdd_event(pf);
11282 i40e_vc_process_vflr_event(pf);
11283 i40e_watchdog_subtask(pf);
11284 i40e_fdir_reinit_subtask(pf);
11285 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
11286 /* Client subtask will reopen next time through. */
11287 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
11290 i40e_client_subtask(pf);
11291 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
11293 i40e_notify_client_of_l2_param_changes(
11294 pf->vsi[pf->lan_vsi]);
11296 i40e_sync_filters_subtask(pf);
11298 i40e_reset_subtask(pf);
11301 i40e_clean_adminq_subtask(pf);
11303 /* flush memory to make sure state is correct before next watchdog */
11304 smp_mb__before_atomic();
11305 clear_bit(__I40E_SERVICE_SCHED, pf->state);
11307 /* If the tasks have taken longer than one timer cycle or there
11308 * is more work to be done, reschedule the service task now
11309 * rather than wait for the timer to tick again.
11311 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
11312 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
11313 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
11314 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
11315 i40e_service_event_schedule(pf);
11319 * i40e_service_timer - timer callback
11320 * @t: timer list pointer
11322 static void i40e_service_timer(struct timer_list *t)
11324 struct i40e_pf *pf = from_timer(pf, t, service_timer);
11326 mod_timer(&pf->service_timer,
11327 round_jiffies(jiffies + pf->service_timer_period));
11328 i40e_service_event_schedule(pf);
11332 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
11333 * @vsi: the VSI being configured
11335 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
11337 struct i40e_pf *pf = vsi->back;
11339 switch (vsi->type) {
11340 case I40E_VSI_MAIN:
11341 vsi->alloc_queue_pairs = pf->num_lan_qps;
11342 if (!vsi->num_tx_desc)
11343 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11344 I40E_REQ_DESCRIPTOR_MULTIPLE);
11345 if (!vsi->num_rx_desc)
11346 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11347 I40E_REQ_DESCRIPTOR_MULTIPLE);
11348 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
11349 vsi->num_q_vectors = pf->num_lan_msix;
11351 vsi->num_q_vectors = 1;
11355 case I40E_VSI_FDIR:
11356 vsi->alloc_queue_pairs = 1;
11357 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11358 I40E_REQ_DESCRIPTOR_MULTIPLE);
11359 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11360 I40E_REQ_DESCRIPTOR_MULTIPLE);
11361 vsi->num_q_vectors = pf->num_fdsb_msix;
11364 case I40E_VSI_VMDQ2:
11365 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
11366 if (!vsi->num_tx_desc)
11367 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11368 I40E_REQ_DESCRIPTOR_MULTIPLE);
11369 if (!vsi->num_rx_desc)
11370 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11371 I40E_REQ_DESCRIPTOR_MULTIPLE);
11372 vsi->num_q_vectors = pf->num_vmdq_msix;
11375 case I40E_VSI_SRIOV:
11376 vsi->alloc_queue_pairs = pf->num_vf_qps;
11377 if (!vsi->num_tx_desc)
11378 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11379 I40E_REQ_DESCRIPTOR_MULTIPLE);
11380 if (!vsi->num_rx_desc)
11381 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11382 I40E_REQ_DESCRIPTOR_MULTIPLE);
11390 if (is_kdump_kernel()) {
11391 vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS;
11392 vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS;
11399 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
11400 * @vsi: VSI pointer
11401 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
11403 * On error: returns error code (negative)
11404 * On success: returns 0
11406 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
11408 struct i40e_ring **next_rings;
11412 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
11413 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
11414 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
11415 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
11416 if (!vsi->tx_rings)
11418 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
11419 if (i40e_enabled_xdp_vsi(vsi)) {
11420 vsi->xdp_rings = next_rings;
11421 next_rings += vsi->alloc_queue_pairs;
11423 vsi->rx_rings = next_rings;
11425 if (alloc_qvectors) {
11426 /* allocate memory for q_vector pointers */
11427 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
11428 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
11429 if (!vsi->q_vectors) {
11437 kfree(vsi->tx_rings);
11442 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
11443 * @pf: board private structure
11444 * @type: type of VSI
11446 * On error: returns error code (negative)
11447 * On success: returns vsi index in PF (positive)
11449 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
11452 struct i40e_vsi *vsi;
11456 /* Need to protect the allocation of the VSIs at the PF level */
11457 mutex_lock(&pf->switch_mutex);
11459 /* VSI list may be fragmented if VSI creation/destruction has
11460 * been happening. We can afford to do a quick scan to look
11461 * for any free VSIs in the list.
11463 * find next empty vsi slot, looping back around if necessary
11466 while (i < pf->num_alloc_vsi && pf->vsi[i])
11468 if (i >= pf->num_alloc_vsi) {
11470 while (i < pf->next_vsi && pf->vsi[i])
11474 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
11475 vsi_idx = i; /* Found one! */
11478 goto unlock_pf; /* out of VSI slots! */
11480 pf->next_vsi = ++i;
11482 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
11489 set_bit(__I40E_VSI_DOWN, vsi->state);
11491 vsi->idx = vsi_idx;
11492 vsi->int_rate_limit = 0;
11493 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
11494 pf->rss_table_size : 64;
11495 vsi->netdev_registered = false;
11496 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
11497 hash_init(vsi->mac_filter_hash);
11498 vsi->irqs_ready = false;
11500 if (type == I40E_VSI_MAIN) {
11501 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
11502 if (!vsi->af_xdp_zc_qps)
11506 ret = i40e_set_num_rings_in_vsi(vsi);
11510 ret = i40e_vsi_alloc_arrays(vsi, true);
11514 /* Setup default MSIX irq handler for VSI */
11515 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
11517 /* Initialize VSI lock */
11518 spin_lock_init(&vsi->mac_filter_hash_lock);
11519 pf->vsi[vsi_idx] = vsi;
11524 bitmap_free(vsi->af_xdp_zc_qps);
11525 pf->next_vsi = i - 1;
11528 mutex_unlock(&pf->switch_mutex);
11533 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
11534 * @vsi: VSI pointer
11535 * @free_qvectors: a bool to specify if q_vectors need to be freed.
11537 * On error: returns error code (negative)
11538 * On success: returns 0
11540 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
11542 /* free the ring and vector containers */
11543 if (free_qvectors) {
11544 kfree(vsi->q_vectors);
11545 vsi->q_vectors = NULL;
11547 kfree(vsi->tx_rings);
11548 vsi->tx_rings = NULL;
11549 vsi->rx_rings = NULL;
11550 vsi->xdp_rings = NULL;
11554 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
11556 * @vsi: Pointer to VSI structure
11558 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
11563 kfree(vsi->rss_hkey_user);
11564 vsi->rss_hkey_user = NULL;
11566 kfree(vsi->rss_lut_user);
11567 vsi->rss_lut_user = NULL;
11571 * i40e_vsi_clear - Deallocate the VSI provided
11572 * @vsi: the VSI being un-configured
11574 static int i40e_vsi_clear(struct i40e_vsi *vsi)
11576 struct i40e_pf *pf;
11585 mutex_lock(&pf->switch_mutex);
11586 if (!pf->vsi[vsi->idx]) {
11587 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
11588 vsi->idx, vsi->idx, vsi->type);
11592 if (pf->vsi[vsi->idx] != vsi) {
11593 dev_err(&pf->pdev->dev,
11594 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
11595 pf->vsi[vsi->idx]->idx,
11596 pf->vsi[vsi->idx]->type,
11597 vsi->idx, vsi->type);
11601 /* updates the PF for this cleared vsi */
11602 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
11603 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
11605 bitmap_free(vsi->af_xdp_zc_qps);
11606 i40e_vsi_free_arrays(vsi, true);
11607 i40e_clear_rss_config_user(vsi);
11609 pf->vsi[vsi->idx] = NULL;
11610 if (vsi->idx < pf->next_vsi)
11611 pf->next_vsi = vsi->idx;
11614 mutex_unlock(&pf->switch_mutex);
11622 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
11623 * @vsi: the VSI being cleaned
11625 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
11629 if (vsi->tx_rings && vsi->tx_rings[0]) {
11630 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11631 kfree_rcu(vsi->tx_rings[i], rcu);
11632 WRITE_ONCE(vsi->tx_rings[i], NULL);
11633 WRITE_ONCE(vsi->rx_rings[i], NULL);
11634 if (vsi->xdp_rings)
11635 WRITE_ONCE(vsi->xdp_rings[i], NULL);
11641 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
11642 * @vsi: the VSI being configured
11644 static int i40e_alloc_rings(struct i40e_vsi *vsi)
11646 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
11647 struct i40e_pf *pf = vsi->back;
11648 struct i40e_ring *ring;
11650 /* Set basic values in the rings to be used later during open() */
11651 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11652 /* allocate space for both Tx and Rx in one shot */
11653 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
11657 ring->queue_index = i;
11658 ring->reg_idx = vsi->base_queue + i;
11659 ring->ring_active = false;
11661 ring->netdev = vsi->netdev;
11662 ring->dev = &pf->pdev->dev;
11663 ring->count = vsi->num_tx_desc;
11666 if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps))
11667 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11668 ring->itr_setting = pf->tx_itr_default;
11669 WRITE_ONCE(vsi->tx_rings[i], ring++);
11671 if (!i40e_enabled_xdp_vsi(vsi))
11674 ring->queue_index = vsi->alloc_queue_pairs + i;
11675 ring->reg_idx = vsi->base_queue + ring->queue_index;
11676 ring->ring_active = false;
11678 ring->netdev = NULL;
11679 ring->dev = &pf->pdev->dev;
11680 ring->count = vsi->num_tx_desc;
11683 if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps))
11684 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11685 set_ring_xdp(ring);
11686 ring->itr_setting = pf->tx_itr_default;
11687 WRITE_ONCE(vsi->xdp_rings[i], ring++);
11690 ring->queue_index = i;
11691 ring->reg_idx = vsi->base_queue + i;
11692 ring->ring_active = false;
11694 ring->netdev = vsi->netdev;
11695 ring->dev = &pf->pdev->dev;
11696 ring->count = vsi->num_rx_desc;
11699 ring->itr_setting = pf->rx_itr_default;
11700 WRITE_ONCE(vsi->rx_rings[i], ring);
11706 i40e_vsi_clear_rings(vsi);
11711 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
11712 * @pf: board private structure
11713 * @vectors: the number of MSI-X vectors to request
11715 * Returns the number of vectors reserved, or error
11717 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
11719 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
11720 I40E_MIN_MSIX, vectors);
11722 dev_info(&pf->pdev->dev,
11723 "MSI-X vector reservation failed: %d\n", vectors);
11731 * i40e_init_msix - Setup the MSIX capability
11732 * @pf: board private structure
11734 * Work with the OS to set up the MSIX vectors needed.
11736 * Returns the number of vectors reserved or negative on failure
11738 static int i40e_init_msix(struct i40e_pf *pf)
11740 struct i40e_hw *hw = &pf->hw;
11741 int cpus, extra_vectors;
11745 int iwarp_requested = 0;
11747 if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
11750 /* The number of vectors we'll request will be comprised of:
11751 * - Add 1 for "other" cause for Admin Queue events, etc.
11752 * - The number of LAN queue pairs
11753 * - Queues being used for RSS.
11754 * We don't need as many as max_rss_size vectors.
11755 * use rss_size instead in the calculation since that
11756 * is governed by number of cpus in the system.
11757 * - assumes symmetric Tx/Rx pairing
11758 * - The number of VMDq pairs
11759 * - The CPU count within the NUMA node if iWARP is enabled
11760 * Once we count this up, try the request.
11762 * If we can't get what we want, we'll simplify to nearly nothing
11763 * and try again. If that still fails, we punt.
11765 vectors_left = hw->func_caps.num_msix_vectors;
11768 /* reserve one vector for miscellaneous handler */
11769 if (vectors_left) {
11774 /* reserve some vectors for the main PF traffic queues. Initially we
11775 * only reserve at most 50% of the available vectors, in the case that
11776 * the number of online CPUs is large. This ensures that we can enable
11777 * extra features as well. Once we've enabled the other features, we
11778 * will use any remaining vectors to reach as close as we can to the
11779 * number of online CPUs.
11781 cpus = num_online_cpus();
11782 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
11783 vectors_left -= pf->num_lan_msix;
11785 /* reserve one vector for sideband flow director */
11786 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
11787 if (vectors_left) {
11788 pf->num_fdsb_msix = 1;
11792 pf->num_fdsb_msix = 0;
11796 /* can we reserve enough for iWARP? */
11797 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
11798 iwarp_requested = pf->num_iwarp_msix;
11801 pf->num_iwarp_msix = 0;
11802 else if (vectors_left < pf->num_iwarp_msix)
11803 pf->num_iwarp_msix = 1;
11804 v_budget += pf->num_iwarp_msix;
11805 vectors_left -= pf->num_iwarp_msix;
11808 /* any vectors left over go for VMDq support */
11809 if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags)) {
11810 if (!vectors_left) {
11811 pf->num_vmdq_msix = 0;
11812 pf->num_vmdq_qps = 0;
11814 int vmdq_vecs_wanted =
11815 pf->num_vmdq_vsis * pf->num_vmdq_qps;
11817 min_t(int, vectors_left, vmdq_vecs_wanted);
11819 /* if we're short on vectors for what's desired, we limit
11820 * the queues per vmdq. If this is still more than are
11821 * available, the user will need to change the number of
11822 * queues/vectors used by the PF later with the ethtool
11825 if (vectors_left < vmdq_vecs_wanted) {
11826 pf->num_vmdq_qps = 1;
11827 vmdq_vecs_wanted = pf->num_vmdq_vsis;
11828 vmdq_vecs = min_t(int,
11832 pf->num_vmdq_msix = pf->num_vmdq_qps;
11834 v_budget += vmdq_vecs;
11835 vectors_left -= vmdq_vecs;
11839 /* On systems with a large number of SMP cores, we previously limited
11840 * the number of vectors for num_lan_msix to be at most 50% of the
11841 * available vectors, to allow for other features. Now, we add back
11842 * the remaining vectors. However, we ensure that the total
11843 * num_lan_msix will not exceed num_online_cpus(). To do this, we
11844 * calculate the number of vectors we can add without going over the
11845 * cap of CPUs. For systems with a small number of CPUs this will be
11848 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11849 pf->num_lan_msix += extra_vectors;
11850 vectors_left -= extra_vectors;
11852 WARN(vectors_left < 0,
11853 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11855 v_budget += pf->num_lan_msix;
11856 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11858 if (!pf->msix_entries)
11861 for (i = 0; i < v_budget; i++)
11862 pf->msix_entries[i].entry = i;
11863 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11865 if (v_actual < I40E_MIN_MSIX) {
11866 clear_bit(I40E_FLAG_MSIX_ENA, pf->flags);
11867 kfree(pf->msix_entries);
11868 pf->msix_entries = NULL;
11869 pci_disable_msix(pf->pdev);
11872 } else if (v_actual == I40E_MIN_MSIX) {
11873 /* Adjust for minimal MSIX use */
11874 pf->num_vmdq_vsis = 0;
11875 pf->num_vmdq_qps = 0;
11876 pf->num_lan_qps = 1;
11877 pf->num_lan_msix = 1;
11879 } else if (v_actual != v_budget) {
11880 /* If we have limited resources, we will start with no vectors
11881 * for the special features and then allocate vectors to some
11882 * of these features based on the policy and at the end disable
11883 * the features that did not get any vectors.
11887 dev_info(&pf->pdev->dev,
11888 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11889 v_actual, v_budget);
11890 /* reserve the misc vector */
11891 vec = v_actual - 1;
11893 /* Scale vector usage down */
11894 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
11895 pf->num_vmdq_vsis = 1;
11896 pf->num_vmdq_qps = 1;
11898 /* partition out the remaining vectors */
11901 pf->num_lan_msix = 1;
11904 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
11905 pf->num_lan_msix = 1;
11906 pf->num_iwarp_msix = 1;
11908 pf->num_lan_msix = 2;
11912 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
11913 pf->num_iwarp_msix = min_t(int, (vec / 3),
11915 pf->num_vmdq_vsis = min_t(int, (vec / 3),
11916 I40E_DEFAULT_NUM_VMDQ_VSI);
11918 pf->num_vmdq_vsis = min_t(int, (vec / 2),
11919 I40E_DEFAULT_NUM_VMDQ_VSI);
11921 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
11922 pf->num_fdsb_msix = 1;
11925 pf->num_lan_msix = min_t(int,
11926 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11928 pf->num_lan_qps = pf->num_lan_msix;
11933 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && pf->num_fdsb_msix == 0) {
11934 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11935 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
11936 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
11938 if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) && pf->num_vmdq_msix == 0) {
11939 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11940 clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
11943 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) &&
11944 pf->num_iwarp_msix == 0) {
11945 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11946 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
11948 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11949 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11951 pf->num_vmdq_msix * pf->num_vmdq_vsis,
11953 pf->num_iwarp_msix);
11959 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
11960 * @vsi: the VSI being configured
11961 * @v_idx: index of the vector in the vsi struct
11963 * We allocate one q_vector. If allocation fails we return -ENOMEM.
11965 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
11967 struct i40e_q_vector *q_vector;
11969 /* allocate q_vector */
11970 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11974 q_vector->vsi = vsi;
11975 q_vector->v_idx = v_idx;
11976 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11979 netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll);
11981 /* tie q_vector and vsi together */
11982 vsi->q_vectors[v_idx] = q_vector;
11988 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
11989 * @vsi: the VSI being configured
11991 * We allocate one q_vector per queue interrupt. If allocation fails we
11994 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11996 struct i40e_pf *pf = vsi->back;
11997 int err, v_idx, num_q_vectors;
11999 /* if not MSIX, give the one vector only to the LAN VSI */
12000 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
12001 num_q_vectors = vsi->num_q_vectors;
12002 else if (vsi == pf->vsi[pf->lan_vsi])
12007 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
12008 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
12017 i40e_free_q_vector(vsi, v_idx);
12023 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
12024 * @pf: board private structure to initialize
12026 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
12031 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
12032 vectors = i40e_init_msix(pf);
12034 clear_bit(I40E_FLAG_MSIX_ENA, pf->flags);
12035 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
12036 clear_bit(I40E_FLAG_RSS_ENA, pf->flags);
12037 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
12038 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
12039 clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
12040 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
12041 clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
12042 clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
12043 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
12045 /* rework the queue expectations without MSIX */
12046 i40e_determine_queue_usage(pf);
12050 if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags) &&
12051 test_bit(I40E_FLAG_MSI_ENA, pf->flags)) {
12052 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
12053 vectors = pci_enable_msi(pf->pdev);
12055 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
12057 clear_bit(I40E_FLAG_MSI_ENA, pf->flags);
12059 vectors = 1; /* one MSI or Legacy vector */
12062 if (!test_bit(I40E_FLAG_MSI_ENA, pf->flags) &&
12063 !test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
12064 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
12066 /* set up vector assignment tracking */
12067 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
12068 pf->irq_pile = kzalloc(size, GFP_KERNEL);
12072 pf->irq_pile->num_entries = vectors;
12074 /* track first vector for misc interrupts, ignore return */
12075 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
12081 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
12082 * @pf: private board data structure
12084 * Restore the interrupt scheme that was cleared when we suspended the
12085 * device. This should be called during resume to re-allocate the q_vectors
12086 * and reacquire IRQs.
12088 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
12092 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
12093 * scheme. We need to re-enabled them here in order to attempt to
12094 * re-acquire the MSI or MSI-X vectors
12096 set_bit(I40E_FLAG_MSI_ENA, pf->flags);
12097 set_bit(I40E_FLAG_MSIX_ENA, pf->flags);
12099 err = i40e_init_interrupt_scheme(pf);
12103 /* Now that we've re-acquired IRQs, we need to remap the vectors and
12104 * rings together again.
12106 for (i = 0; i < pf->num_alloc_vsi; i++) {
12108 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
12111 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
12115 err = i40e_setup_misc_vector(pf);
12119 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags))
12120 i40e_client_update_msix_info(pf);
12127 i40e_vsi_free_q_vectors(pf->vsi[i]);
12134 * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
12135 * non queue events in recovery mode
12136 * @pf: board private structure
12138 * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
12139 * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
12140 * This is handled differently than in recovery mode since no Tx/Rx resources
12141 * are being allocated.
12143 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
12147 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
12148 err = i40e_setup_misc_vector(pf);
12151 dev_info(&pf->pdev->dev,
12152 "MSI-X misc vector request failed, error %d\n",
12157 u32 flags = test_bit(I40E_FLAG_MSI_ENA, pf->flags) ? 0 : IRQF_SHARED;
12159 err = request_irq(pf->pdev->irq, i40e_intr, flags,
12163 dev_info(&pf->pdev->dev,
12164 "MSI/legacy misc vector request failed, error %d\n",
12168 i40e_enable_misc_int_causes(pf);
12169 i40e_irq_dynamic_enable_icr0(pf);
12176 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
12177 * @pf: board private structure
12179 * This sets up the handler for MSIX 0, which is used to manage the
12180 * non-queue interrupts, e.g. AdminQ and errors. This is not used
12181 * when in MSI or Legacy interrupt mode.
12183 static int i40e_setup_misc_vector(struct i40e_pf *pf)
12185 struct i40e_hw *hw = &pf->hw;
12188 /* Only request the IRQ once, the first time through. */
12189 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
12190 err = request_irq(pf->msix_entries[0].vector,
12191 i40e_intr, 0, pf->int_name, pf);
12193 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
12194 dev_info(&pf->pdev->dev,
12195 "request_irq for %s failed: %d\n",
12196 pf->int_name, err);
12201 i40e_enable_misc_int_causes(pf);
12203 /* associate no queues to the misc vector */
12204 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
12205 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
12209 i40e_irq_dynamic_enable_icr0(pf);
12215 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
12216 * @vsi: Pointer to vsi structure
12217 * @seed: Buffter to store the hash keys
12218 * @lut: Buffer to store the lookup table entries
12219 * @lut_size: Size of buffer to store the lookup table entries
12221 * Return 0 on success, negative on failure
12223 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
12224 u8 *lut, u16 lut_size)
12226 struct i40e_pf *pf = vsi->back;
12227 struct i40e_hw *hw = &pf->hw;
12231 ret = i40e_aq_get_rss_key(hw, vsi->id,
12232 (struct i40e_aqc_get_set_rss_key_data *)seed);
12234 dev_info(&pf->pdev->dev,
12235 "Cannot get RSS key, err %pe aq_err %s\n",
12237 i40e_aq_str(&pf->hw,
12238 pf->hw.aq.asq_last_status));
12244 bool pf_lut = vsi->type == I40E_VSI_MAIN;
12246 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
12248 dev_info(&pf->pdev->dev,
12249 "Cannot get RSS lut, err %pe aq_err %s\n",
12251 i40e_aq_str(&pf->hw,
12252 pf->hw.aq.asq_last_status));
12261 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
12262 * @vsi: Pointer to vsi structure
12263 * @seed: RSS hash seed
12264 * @lut: Lookup table
12265 * @lut_size: Lookup table size
12267 * Returns 0 on success, negative on failure
12269 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
12270 const u8 *lut, u16 lut_size)
12272 struct i40e_pf *pf = vsi->back;
12273 struct i40e_hw *hw = &pf->hw;
12274 u16 vf_id = vsi->vf_id;
12277 /* Fill out hash function seed */
12279 u32 *seed_dw = (u32 *)seed;
12281 if (vsi->type == I40E_VSI_MAIN) {
12282 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12283 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
12284 } else if (vsi->type == I40E_VSI_SRIOV) {
12285 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
12286 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
12288 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
12293 u32 *lut_dw = (u32 *)lut;
12295 if (vsi->type == I40E_VSI_MAIN) {
12296 if (lut_size != I40E_HLUT_ARRAY_SIZE)
12298 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12299 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
12300 } else if (vsi->type == I40E_VSI_SRIOV) {
12301 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
12303 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12304 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
12306 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12315 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
12316 * @vsi: Pointer to VSI structure
12317 * @seed: Buffer to store the keys
12318 * @lut: Buffer to store the lookup table entries
12319 * @lut_size: Size of buffer to store the lookup table entries
12321 * Returns 0 on success, negative on failure
12323 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
12324 u8 *lut, u16 lut_size)
12326 struct i40e_pf *pf = vsi->back;
12327 struct i40e_hw *hw = &pf->hw;
12331 u32 *seed_dw = (u32 *)seed;
12333 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12334 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
12337 u32 *lut_dw = (u32 *)lut;
12339 if (lut_size != I40E_HLUT_ARRAY_SIZE)
12341 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12342 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
12349 * i40e_config_rss - Configure RSS keys and lut
12350 * @vsi: Pointer to VSI structure
12351 * @seed: RSS hash seed
12352 * @lut: Lookup table
12353 * @lut_size: Lookup table size
12355 * Returns 0 on success, negative on failure
12357 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12359 struct i40e_pf *pf = vsi->back;
12361 if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps))
12362 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
12364 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
12368 * i40e_get_rss - Get RSS keys and lut
12369 * @vsi: Pointer to VSI structure
12370 * @seed: Buffer to store the keys
12371 * @lut: Buffer to store the lookup table entries
12372 * @lut_size: Size of buffer to store the lookup table entries
12374 * Returns 0 on success, negative on failure
12376 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12378 struct i40e_pf *pf = vsi->back;
12380 if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps))
12381 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
12383 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
12387 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
12388 * @pf: Pointer to board private structure
12389 * @lut: Lookup table
12390 * @rss_table_size: Lookup table size
12391 * @rss_size: Range of queue number for hashing
12393 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
12394 u16 rss_table_size, u16 rss_size)
12398 for (i = 0; i < rss_table_size; i++)
12399 lut[i] = i % rss_size;
12403 * i40e_pf_config_rss - Prepare for RSS if used
12404 * @pf: board private structure
12406 static int i40e_pf_config_rss(struct i40e_pf *pf)
12408 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12409 u8 seed[I40E_HKEY_ARRAY_SIZE];
12411 struct i40e_hw *hw = &pf->hw;
12416 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
12417 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
12418 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
12419 hena |= i40e_pf_get_default_rss_hena(pf);
12421 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
12422 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
12424 /* Determine the RSS table size based on the hardware capabilities */
12425 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
12426 reg_val = (pf->rss_table_size == 512) ?
12427 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
12428 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
12429 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
12431 /* Determine the RSS size of the VSI */
12432 if (!vsi->rss_size) {
12434 /* If the firmware does something weird during VSI init, we
12435 * could end up with zero TCs. Check for that to avoid
12436 * divide-by-zero. It probably won't pass traffic, but it also
12439 qcount = vsi->num_queue_pairs /
12440 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
12441 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12443 if (!vsi->rss_size)
12446 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
12450 /* Use user configured lut if there is one, otherwise use default */
12451 if (vsi->rss_lut_user)
12452 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
12454 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
12456 /* Use user configured hash key if there is one, otherwise
12459 if (vsi->rss_hkey_user)
12460 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
12462 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
12463 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
12470 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
12471 * @pf: board private structure
12472 * @queue_count: the requested queue count for rss.
12474 * returns 0 if rss is not enabled, if enabled returns the final rss queue
12475 * count which may be different from the requested queue count.
12476 * Note: expects to be called while under rtnl_lock()
12478 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
12480 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12483 if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags))
12486 queue_count = min_t(int, queue_count, num_online_cpus());
12487 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
12489 if (queue_count != vsi->num_queue_pairs) {
12492 vsi->req_queue_pairs = queue_count;
12493 i40e_prep_for_reset(pf);
12494 if (test_bit(__I40E_IN_REMOVE, pf->state))
12495 return pf->alloc_rss_size;
12497 pf->alloc_rss_size = new_rss_size;
12499 i40e_reset_and_rebuild(pf, true, true);
12501 /* Discard the user configured hash keys and lut, if less
12502 * queues are enabled.
12504 if (queue_count < vsi->rss_size) {
12505 i40e_clear_rss_config_user(vsi);
12506 dev_dbg(&pf->pdev->dev,
12507 "discard user configured hash keys and lut\n");
12510 /* Reset vsi->rss_size, as number of enabled queues changed */
12511 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
12512 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12514 i40e_pf_config_rss(pf);
12516 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
12517 vsi->req_queue_pairs, pf->rss_size_max);
12518 return pf->alloc_rss_size;
12522 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
12523 * @pf: board private structure
12525 int i40e_get_partition_bw_setting(struct i40e_pf *pf)
12527 bool min_valid, max_valid;
12528 u32 max_bw, min_bw;
12531 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
12532 &min_valid, &max_valid);
12536 pf->min_bw = min_bw;
12538 pf->max_bw = max_bw;
12545 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
12546 * @pf: board private structure
12548 int i40e_set_partition_bw_setting(struct i40e_pf *pf)
12550 struct i40e_aqc_configure_partition_bw_data bw_data;
12553 memset(&bw_data, 0, sizeof(bw_data));
12555 /* Set the valid bit for this PF */
12556 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
12557 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
12558 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
12560 /* Set the new bandwidths */
12561 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
12567 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
12568 * @pf: board private structure
12570 int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
12572 /* Commit temporary BW setting to permanent NVM image */
12573 enum i40e_admin_queue_err last_aq_status;
12577 if (pf->hw.partition_id != 1) {
12578 dev_info(&pf->pdev->dev,
12579 "Commit BW only works on partition 1! This is partition %d",
12580 pf->hw.partition_id);
12582 goto bw_commit_out;
12585 /* Acquire NVM for read access */
12586 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
12587 last_aq_status = pf->hw.aq.asq_last_status;
12589 dev_info(&pf->pdev->dev,
12590 "Cannot acquire NVM for read access, err %pe aq_err %s\n",
12592 i40e_aq_str(&pf->hw, last_aq_status));
12593 goto bw_commit_out;
12596 /* Read word 0x10 of NVM - SW compatibility word 1 */
12597 ret = i40e_aq_read_nvm(&pf->hw,
12598 I40E_SR_NVM_CONTROL_WORD,
12599 0x10, sizeof(nvm_word), &nvm_word,
12601 /* Save off last admin queue command status before releasing
12604 last_aq_status = pf->hw.aq.asq_last_status;
12605 i40e_release_nvm(&pf->hw);
12607 dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n",
12609 i40e_aq_str(&pf->hw, last_aq_status));
12610 goto bw_commit_out;
12613 /* Wait a bit for NVM release to complete */
12616 /* Acquire NVM for write access */
12617 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
12618 last_aq_status = pf->hw.aq.asq_last_status;
12620 dev_info(&pf->pdev->dev,
12621 "Cannot acquire NVM for write access, err %pe aq_err %s\n",
12623 i40e_aq_str(&pf->hw, last_aq_status));
12624 goto bw_commit_out;
12626 /* Write it back out unchanged to initiate update NVM,
12627 * which will force a write of the shadow (alt) RAM to
12628 * the NVM - thus storing the bandwidth values permanently.
12630 ret = i40e_aq_update_nvm(&pf->hw,
12631 I40E_SR_NVM_CONTROL_WORD,
12632 0x10, sizeof(nvm_word),
12633 &nvm_word, true, 0, NULL);
12634 /* Save off last admin queue command status before releasing
12637 last_aq_status = pf->hw.aq.asq_last_status;
12638 i40e_release_nvm(&pf->hw);
12640 dev_info(&pf->pdev->dev,
12641 "BW settings NOT SAVED, err %pe aq_err %s\n",
12643 i40e_aq_str(&pf->hw, last_aq_status));
12650 * i40e_is_total_port_shutdown_enabled - read NVM and return value
12651 * if total port shutdown feature is enabled for this PF
12652 * @pf: board private structure
12654 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
12656 #define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4)
12657 #define I40E_FEATURES_ENABLE_PTR 0x2A
12658 #define I40E_CURRENT_SETTING_PTR 0x2B
12659 #define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D
12660 #define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
12661 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
12662 #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
12663 u16 sr_emp_sr_settings_ptr = 0;
12664 u16 features_enable = 0;
12665 u16 link_behavior = 0;
12666 int read_status = 0;
12669 read_status = i40e_read_nvm_word(&pf->hw,
12670 I40E_SR_EMP_SR_SETTINGS_PTR,
12671 &sr_emp_sr_settings_ptr);
12674 read_status = i40e_read_nvm_word(&pf->hw,
12675 sr_emp_sr_settings_ptr +
12676 I40E_FEATURES_ENABLE_PTR,
12680 if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
12681 read_status = i40e_read_nvm_module_data(&pf->hw,
12682 I40E_SR_EMP_SR_SETTINGS_PTR,
12683 I40E_CURRENT_SETTING_PTR,
12684 I40E_LINK_BEHAVIOR_WORD_OFFSET,
12685 I40E_LINK_BEHAVIOR_WORD_LENGTH,
12689 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
12690 ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
12695 dev_warn(&pf->pdev->dev,
12696 "total-port-shutdown feature is off due to read nvm error: %pe\n",
12697 ERR_PTR(read_status));
12702 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
12703 * @pf: board private structure to initialize
12705 * i40e_sw_init initializes the Adapter private data structure.
12706 * Fields are initialized based on PCI device information and
12707 * OS network device settings (MTU size).
12709 static int i40e_sw_init(struct i40e_pf *pf)
12715 /* Set default capability flags */
12716 bitmap_zero(pf->flags, I40E_PF_FLAGS_NBITS);
12717 set_bit(I40E_FLAG_MSI_ENA, pf->flags);
12718 set_bit(I40E_FLAG_MSIX_ENA, pf->flags);
12720 /* Set default ITR */
12721 pf->rx_itr_default = I40E_ITR_RX_DEF;
12722 pf->tx_itr_default = I40E_ITR_TX_DEF;
12724 /* Depending on PF configurations, it is possible that the RSS
12725 * maximum might end up larger than the available queues
12727 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
12728 pf->alloc_rss_size = 1;
12729 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
12730 pf->rss_size_max = min_t(int, pf->rss_size_max,
12731 pf->hw.func_caps.num_tx_qp);
12733 /* find the next higher power-of-2 of num cpus */
12734 pow = roundup_pow_of_two(num_online_cpus());
12735 pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
12737 if (pf->hw.func_caps.rss) {
12738 set_bit(I40E_FLAG_RSS_ENA, pf->flags);
12739 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
12740 num_online_cpus());
12743 /* MFP mode enabled */
12744 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
12745 set_bit(I40E_FLAG_MFP_ENA, pf->flags);
12746 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
12747 if (i40e_get_partition_bw_setting(pf)) {
12748 dev_warn(&pf->pdev->dev,
12749 "Could not get partition bw settings\n");
12751 dev_info(&pf->pdev->dev,
12752 "Partition BW Min = %8.8x, Max = %8.8x\n",
12753 pf->min_bw, pf->max_bw);
12755 /* nudge the Tx scheduler */
12756 i40e_set_partition_bw_setting(pf);
12760 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
12761 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
12762 set_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
12763 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) &&
12764 pf->hw.num_partitions > 1)
12765 dev_info(&pf->pdev->dev,
12766 "Flow Director Sideband mode Disabled in MFP mode\n");
12768 set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
12769 pf->fdir_pf_filter_count =
12770 pf->hw.func_caps.fd_filters_guaranteed;
12771 pf->hw.fdir_shared_filter_count =
12772 pf->hw.func_caps.fd_filters_best_effort;
12775 /* Enable HW ATR eviction if possible */
12776 if (test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps))
12777 set_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags);
12779 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
12780 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
12781 set_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
12782 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
12785 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
12786 set_bit(I40E_FLAG_IWARP_ENA, pf->flags);
12787 /* IWARP needs one extra vector for CQP just like MISC.*/
12788 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
12790 /* Stopping FW LLDP engine is supported on XL710 and X722
12791 * starting from FW versions determined in i40e_init_adminq.
12792 * Stopping the FW LLDP engine is not supported on XL710
12793 * if NPAR is functioning so unset this hw flag in this case.
12795 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12796 pf->hw.func_caps.npar_enable)
12797 clear_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps);
12799 #ifdef CONFIG_PCI_IOV
12800 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
12801 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
12802 set_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
12803 pf->num_req_vfs = min_t(int,
12804 pf->hw.func_caps.num_vfs,
12805 I40E_MAX_VF_COUNT);
12807 #endif /* CONFIG_PCI_IOV */
12808 pf->lan_veb = I40E_NO_VEB;
12809 pf->lan_vsi = I40E_NO_VSI;
12811 /* By default FW has this off for performance reasons */
12812 clear_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
12814 /* set up queue assignment tracking */
12815 size = sizeof(struct i40e_lump_tracking)
12816 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12817 pf->qp_pile = kzalloc(size, GFP_KERNEL);
12818 if (!pf->qp_pile) {
12822 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12824 pf->tx_timeout_recovery_level = 1;
12826 if (pf->hw.mac.type != I40E_MAC_X722 &&
12827 i40e_is_total_port_shutdown_enabled(pf)) {
12828 /* Link down on close must be on when total port shutdown
12829 * is enabled for a given port
12831 set_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
12832 set_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
12833 dev_info(&pf->pdev->dev,
12834 "total-port-shutdown was enabled, link-down-on-close is forced on\n");
12836 mutex_init(&pf->switch_mutex);
12843 * i40e_set_ntuple - set the ntuple feature flag and take action
12844 * @pf: board private structure to initialize
12845 * @features: the feature set that the stack is suggesting
12847 * returns a bool to indicate if reset needs to happen
12849 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12851 bool need_reset = false;
12853 /* Check if Flow Director n-tuple support was enabled or disabled. If
12854 * the state changed, we need to reset.
12856 if (features & NETIF_F_NTUPLE) {
12857 /* Enable filters and mark for reset */
12858 if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
12860 /* enable FD_SB only if there is MSI-X vector and no cloud
12863 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12864 set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
12865 clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
12868 /* turn off filters, mark for reset and clear SW filter list */
12869 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
12871 i40e_fdir_filter_exit(pf);
12873 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
12874 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12875 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
12877 /* reset fd counters */
12878 pf->fd_add_err = 0;
12879 pf->fd_atr_cnt = 0;
12880 /* if ATR was auto disabled it can be re-enabled. */
12881 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12882 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
12883 (I40E_DEBUG_FD & pf->hw.debug_mask))
12884 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12890 * i40e_clear_rss_lut - clear the rx hash lookup table
12891 * @vsi: the VSI being configured
12893 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12895 struct i40e_pf *pf = vsi->back;
12896 struct i40e_hw *hw = &pf->hw;
12897 u16 vf_id = vsi->vf_id;
12900 if (vsi->type == I40E_VSI_MAIN) {
12901 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12902 wr32(hw, I40E_PFQF_HLUT(i), 0);
12903 } else if (vsi->type == I40E_VSI_SRIOV) {
12904 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12905 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12907 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12912 * i40e_set_loopback - turn on/off loopback mode on underlying PF
12914 * @ena: flag to indicate the on/off setting
12916 static int i40e_set_loopback(struct i40e_vsi *vsi, bool ena)
12918 bool if_running = netif_running(vsi->netdev) &&
12919 !test_and_set_bit(__I40E_VSI_DOWN, vsi->state);
12925 ret = i40e_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
12927 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
12935 * i40e_set_features - set the netdev feature flags
12936 * @netdev: ptr to the netdev being adjusted
12937 * @features: the feature set that the stack is suggesting
12938 * Note: expects to be called while under rtnl_lock()
12940 static int i40e_set_features(struct net_device *netdev,
12941 netdev_features_t features)
12943 struct i40e_netdev_priv *np = netdev_priv(netdev);
12944 struct i40e_vsi *vsi = np->vsi;
12945 struct i40e_pf *pf = vsi->back;
12948 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12949 i40e_pf_config_rss(pf);
12950 else if (!(features & NETIF_F_RXHASH) &&
12951 netdev->features & NETIF_F_RXHASH)
12952 i40e_clear_rss_lut(vsi);
12954 if (features & NETIF_F_HW_VLAN_CTAG_RX)
12955 i40e_vlan_stripping_enable(vsi);
12957 i40e_vlan_stripping_disable(vsi);
12959 if (!(features & NETIF_F_HW_TC) &&
12960 (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
12961 dev_err(&pf->pdev->dev,
12962 "Offloaded tc filters active, can't turn hw_tc_offload off");
12966 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12967 i40e_del_all_macvlans(vsi);
12969 need_reset = i40e_set_ntuple(pf, features);
12972 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12974 if ((features ^ netdev->features) & NETIF_F_LOOPBACK)
12975 return i40e_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
12980 static int i40e_udp_tunnel_set_port(struct net_device *netdev,
12981 unsigned int table, unsigned int idx,
12982 struct udp_tunnel_info *ti)
12984 struct i40e_netdev_priv *np = netdev_priv(netdev);
12985 struct i40e_hw *hw = &np->vsi->back->hw;
12986 u8 type, filter_index;
12989 type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
12990 I40E_AQC_TUNNEL_TYPE_NGE;
12992 ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
12995 netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n",
12997 i40e_aq_str(hw, hw->aq.asq_last_status));
13001 udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
13005 static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
13006 unsigned int table, unsigned int idx,
13007 struct udp_tunnel_info *ti)
13009 struct i40e_netdev_priv *np = netdev_priv(netdev);
13010 struct i40e_hw *hw = &np->vsi->back->hw;
13013 ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
13015 netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n",
13017 i40e_aq_str(hw, hw->aq.asq_last_status));
13024 static int i40e_get_phys_port_id(struct net_device *netdev,
13025 struct netdev_phys_item_id *ppid)
13027 struct i40e_netdev_priv *np = netdev_priv(netdev);
13028 struct i40e_pf *pf = np->vsi->back;
13029 struct i40e_hw *hw = &pf->hw;
13031 if (!test_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps))
13032 return -EOPNOTSUPP;
13034 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
13035 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
13041 * i40e_ndo_fdb_add - add an entry to the hardware database
13042 * @ndm: the input from the stack
13043 * @tb: pointer to array of nladdr (unused)
13044 * @dev: the net device pointer
13045 * @addr: the MAC address entry being added
13047 * @flags: instructions from stack about fdb operation
13048 * @extack: netlink extended ack, unused currently
13050 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
13051 struct net_device *dev,
13052 const unsigned char *addr, u16 vid,
13054 struct netlink_ext_ack *extack)
13056 struct i40e_netdev_priv *np = netdev_priv(dev);
13057 struct i40e_pf *pf = np->vsi->back;
13060 if (!test_bit(I40E_FLAG_SRIOV_ENA, pf->flags))
13061 return -EOPNOTSUPP;
13064 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
13068 /* Hardware does not support aging addresses so if a
13069 * ndm_state is given only allow permanent addresses
13071 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
13072 netdev_info(dev, "FDB only supports static addresses\n");
13076 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
13077 err = dev_uc_add_excl(dev, addr);
13078 else if (is_multicast_ether_addr(addr))
13079 err = dev_mc_add_excl(dev, addr);
13083 /* Only return duplicate errors if NLM_F_EXCL is set */
13084 if (err == -EEXIST && !(flags & NLM_F_EXCL))
13091 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
13092 * @dev: the netdev being configured
13093 * @nlh: RTNL message
13094 * @flags: bridge flags
13095 * @extack: netlink extended ack
13097 * Inserts a new hardware bridge if not already created and
13098 * enables the bridging mode requested (VEB or VEPA). If the
13099 * hardware bridge has already been inserted and the request
13100 * is to change the mode then that requires a PF reset to
13101 * allow rebuild of the components with required hardware
13102 * bridge mode enabled.
13104 * Note: expects to be called while under rtnl_lock()
13106 static int i40e_ndo_bridge_setlink(struct net_device *dev,
13107 struct nlmsghdr *nlh,
13109 struct netlink_ext_ack *extack)
13111 struct i40e_netdev_priv *np = netdev_priv(dev);
13112 struct i40e_vsi *vsi = np->vsi;
13113 struct i40e_pf *pf = vsi->back;
13114 struct i40e_veb *veb = NULL;
13115 struct nlattr *attr, *br_spec;
13118 /* Only for PF VSI for now */
13119 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
13120 return -EOPNOTSUPP;
13122 /* Find the HW bridge for PF VSI */
13123 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13124 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13128 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
13132 nla_for_each_nested(attr, br_spec, rem) {
13135 if (nla_type(attr) != IFLA_BRIDGE_MODE)
13138 mode = nla_get_u16(attr);
13139 if ((mode != BRIDGE_MODE_VEPA) &&
13140 (mode != BRIDGE_MODE_VEB))
13143 /* Insert a new HW bridge */
13145 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
13146 vsi->tc_config.enabled_tc);
13148 veb->bridge_mode = mode;
13149 i40e_config_bridge_mode(veb);
13151 /* No Bridge HW offload available */
13155 } else if (mode != veb->bridge_mode) {
13156 /* Existing HW bridge but different mode needs reset */
13157 veb->bridge_mode = mode;
13158 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
13159 if (mode == BRIDGE_MODE_VEB)
13160 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
13162 clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
13163 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
13172 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
13175 * @seq: RTNL message seq #
13176 * @dev: the netdev being configured
13177 * @filter_mask: unused
13178 * @nlflags: netlink flags passed in
13180 * Return the mode in which the hardware bridge is operating in
13183 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
13184 struct net_device *dev,
13185 u32 __always_unused filter_mask,
13188 struct i40e_netdev_priv *np = netdev_priv(dev);
13189 struct i40e_vsi *vsi = np->vsi;
13190 struct i40e_pf *pf = vsi->back;
13191 struct i40e_veb *veb = NULL;
13194 /* Only for PF VSI for now */
13195 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
13196 return -EOPNOTSUPP;
13198 /* Find the HW bridge for the PF VSI */
13199 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13200 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13207 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
13208 0, 0, nlflags, filter_mask, NULL);
13212 * i40e_features_check - Validate encapsulated packet conforms to limits
13214 * @dev: This physical port's netdev
13215 * @features: Offload features that the stack believes apply
13217 static netdev_features_t i40e_features_check(struct sk_buff *skb,
13218 struct net_device *dev,
13219 netdev_features_t features)
13223 /* No point in doing any of this if neither checksum nor GSO are
13224 * being requested for this frame. We can rule out both by just
13225 * checking for CHECKSUM_PARTIAL
13227 if (skb->ip_summed != CHECKSUM_PARTIAL)
13230 /* We cannot support GSO if the MSS is going to be less than
13231 * 64 bytes. If it is then we need to drop support for GSO.
13233 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
13234 features &= ~NETIF_F_GSO_MASK;
13236 /* MACLEN can support at most 63 words */
13237 len = skb_network_header(skb) - skb->data;
13238 if (len & ~(63 * 2))
13241 /* IPLEN and EIPLEN can support at most 127 dwords */
13242 len = skb_transport_header(skb) - skb_network_header(skb);
13243 if (len & ~(127 * 4))
13246 if (skb->encapsulation) {
13247 /* L4TUNLEN can support 127 words */
13248 len = skb_inner_network_header(skb) - skb_transport_header(skb);
13249 if (len & ~(127 * 2))
13252 /* IPLEN can support at most 127 dwords */
13253 len = skb_inner_transport_header(skb) -
13254 skb_inner_network_header(skb);
13255 if (len & ~(127 * 4))
13259 /* No need to validate L4LEN as TCP is the only protocol with a
13260 * flexible value and we support all possible values supported
13261 * by TCP, which is at most 15 dwords
13266 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13270 * i40e_xdp_setup - add/remove an XDP program
13271 * @vsi: VSI to changed
13272 * @prog: XDP program
13273 * @extack: netlink extended ack
13275 static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
13276 struct netlink_ext_ack *extack)
13278 int frame_size = i40e_max_vsi_frame_size(vsi, prog);
13279 struct i40e_pf *pf = vsi->back;
13280 struct bpf_prog *old_prog;
13284 /* Don't allow frames that span over multiple buffers */
13285 if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) {
13286 NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags");
13290 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
13291 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
13294 i40e_prep_for_reset(pf);
13296 /* VSI shall be deleted in a moment, just return EINVAL */
13297 if (test_bit(__I40E_IN_REMOVE, pf->state))
13300 old_prog = xchg(&vsi->xdp_prog, prog);
13304 xdp_features_clear_redirect_target(vsi->netdev);
13305 /* Wait until ndo_xsk_wakeup completes. */
13308 i40e_reset_and_rebuild(pf, true, true);
13311 if (!i40e_enabled_xdp_vsi(vsi) && prog) {
13312 if (i40e_realloc_rx_bi_zc(vsi, true))
13314 } else if (i40e_enabled_xdp_vsi(vsi) && !prog) {
13315 if (i40e_realloc_rx_bi_zc(vsi, false))
13319 for (i = 0; i < vsi->num_queue_pairs; i++)
13320 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
13323 bpf_prog_put(old_prog);
13325 /* Kick start the NAPI context if there is an AF_XDP socket open
13326 * on that queue id. This so that receiving will start.
13328 if (need_reset && prog) {
13329 for (i = 0; i < vsi->num_queue_pairs; i++)
13330 if (vsi->xdp_rings[i]->xsk_pool)
13331 (void)i40e_xsk_wakeup(vsi->netdev, i,
13333 xdp_features_set_redirect_target(vsi->netdev, true);
13340 * i40e_enter_busy_conf - Enters busy config state
13343 * Returns 0 on success, <0 for failure.
13345 static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
13347 struct i40e_pf *pf = vsi->back;
13350 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
13354 usleep_range(1000, 2000);
13361 * i40e_exit_busy_conf - Exits busy config state
13364 static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
13366 struct i40e_pf *pf = vsi->back;
13368 clear_bit(__I40E_CONFIG_BUSY, pf->state);
13372 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
13374 * @queue_pair: queue pair
13376 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
13378 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
13379 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
13380 memset(&vsi->tx_rings[queue_pair]->stats, 0,
13381 sizeof(vsi->tx_rings[queue_pair]->stats));
13382 if (i40e_enabled_xdp_vsi(vsi)) {
13383 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
13384 sizeof(vsi->xdp_rings[queue_pair]->stats));
13389 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
13391 * @queue_pair: queue pair
13393 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
13395 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
13396 if (i40e_enabled_xdp_vsi(vsi)) {
13397 /* Make sure that in-progress ndo_xdp_xmit calls are
13401 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
13403 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
13407 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
13409 * @queue_pair: queue pair
13410 * @enable: true for enable, false for disable
13412 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
13415 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13416 struct i40e_q_vector *q_vector = rxr->q_vector;
13421 /* All rings in a qp belong to the same qvector. */
13422 if (q_vector->rx.ring || q_vector->tx.ring) {
13424 napi_enable(&q_vector->napi);
13426 napi_disable(&q_vector->napi);
13431 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
13433 * @queue_pair: queue pair
13434 * @enable: true for enable, false for disable
13436 * Returns 0 on success, <0 on failure.
13438 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
13441 struct i40e_pf *pf = vsi->back;
13444 pf_q = vsi->base_queue + queue_pair;
13445 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
13446 false /*is xdp*/, enable);
13448 dev_info(&pf->pdev->dev,
13449 "VSI seid %d Tx ring %d %sable timeout\n",
13450 vsi->seid, pf_q, (enable ? "en" : "dis"));
13454 i40e_control_rx_q(pf, pf_q, enable);
13455 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
13457 dev_info(&pf->pdev->dev,
13458 "VSI seid %d Rx ring %d %sable timeout\n",
13459 vsi->seid, pf_q, (enable ? "en" : "dis"));
13463 /* Due to HW errata, on Rx disable only, the register can
13464 * indicate done before it really is. Needs 50ms to be sure
13469 if (!i40e_enabled_xdp_vsi(vsi))
13472 ret = i40e_control_wait_tx_q(vsi->seid, pf,
13473 pf_q + vsi->alloc_queue_pairs,
13474 true /*is xdp*/, enable);
13476 dev_info(&pf->pdev->dev,
13477 "VSI seid %d XDP Tx ring %d %sable timeout\n",
13478 vsi->seid, pf_q, (enable ? "en" : "dis"));
13485 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
13487 * @queue_pair: queue_pair
13489 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
13491 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13492 struct i40e_pf *pf = vsi->back;
13493 struct i40e_hw *hw = &pf->hw;
13495 /* All rings in a qp belong to the same qvector. */
13496 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
13497 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
13499 i40e_irq_dynamic_enable_icr0(pf);
13505 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
13507 * @queue_pair: queue_pair
13509 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
13511 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13512 struct i40e_pf *pf = vsi->back;
13513 struct i40e_hw *hw = &pf->hw;
13515 /* For simplicity, instead of removing the qp interrupt causes
13516 * from the interrupt linked list, we simply disable the interrupt, and
13517 * leave the list intact.
13519 * All rings in a qp belong to the same qvector.
13521 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
13522 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
13524 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
13526 synchronize_irq(pf->msix_entries[intpf].vector);
13528 /* Legacy and MSI mode - this stops all interrupt handling */
13529 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
13530 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
13532 synchronize_irq(pf->pdev->irq);
13537 * i40e_queue_pair_disable - Disables a queue pair
13539 * @queue_pair: queue pair
13541 * Returns 0 on success, <0 on failure.
13543 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
13547 err = i40e_enter_busy_conf(vsi);
13551 i40e_queue_pair_disable_irq(vsi, queue_pair);
13552 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
13553 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
13554 i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
13555 i40e_queue_pair_clean_rings(vsi, queue_pair);
13556 i40e_queue_pair_reset_stats(vsi, queue_pair);
13562 * i40e_queue_pair_enable - Enables a queue pair
13564 * @queue_pair: queue pair
13566 * Returns 0 on success, <0 on failure.
13568 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
13572 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
13576 if (i40e_enabled_xdp_vsi(vsi)) {
13577 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
13582 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
13586 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
13587 i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
13588 i40e_queue_pair_enable_irq(vsi, queue_pair);
13590 i40e_exit_busy_conf(vsi);
13596 * i40e_xdp - implements ndo_bpf for i40e
13598 * @xdp: XDP command
13600 static int i40e_xdp(struct net_device *dev,
13601 struct netdev_bpf *xdp)
13603 struct i40e_netdev_priv *np = netdev_priv(dev);
13604 struct i40e_vsi *vsi = np->vsi;
13606 if (vsi->type != I40E_VSI_MAIN)
13609 switch (xdp->command) {
13610 case XDP_SETUP_PROG:
13611 return i40e_xdp_setup(vsi, xdp->prog, xdp->extack);
13612 case XDP_SETUP_XSK_POOL:
13613 return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
13614 xdp->xsk.queue_id);
13620 static const struct net_device_ops i40e_netdev_ops = {
13621 .ndo_open = i40e_open,
13622 .ndo_stop = i40e_close,
13623 .ndo_start_xmit = i40e_lan_xmit_frame,
13624 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
13625 .ndo_set_rx_mode = i40e_set_rx_mode,
13626 .ndo_validate_addr = eth_validate_addr,
13627 .ndo_set_mac_address = i40e_set_mac,
13628 .ndo_change_mtu = i40e_change_mtu,
13629 .ndo_eth_ioctl = i40e_ioctl,
13630 .ndo_tx_timeout = i40e_tx_timeout,
13631 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
13632 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
13633 #ifdef CONFIG_NET_POLL_CONTROLLER
13634 .ndo_poll_controller = i40e_netpoll,
13636 .ndo_setup_tc = __i40e_setup_tc,
13637 .ndo_select_queue = i40e_lan_select_queue,
13638 .ndo_set_features = i40e_set_features,
13639 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
13640 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
13641 .ndo_get_vf_stats = i40e_get_vf_stats,
13642 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
13643 .ndo_get_vf_config = i40e_ndo_get_vf_config,
13644 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
13645 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
13646 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
13647 .ndo_get_phys_port_id = i40e_get_phys_port_id,
13648 .ndo_fdb_add = i40e_ndo_fdb_add,
13649 .ndo_features_check = i40e_features_check,
13650 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
13651 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
13652 .ndo_bpf = i40e_xdp,
13653 .ndo_xdp_xmit = i40e_xdp_xmit,
13654 .ndo_xsk_wakeup = i40e_xsk_wakeup,
13655 .ndo_dfwd_add_station = i40e_fwd_add,
13656 .ndo_dfwd_del_station = i40e_fwd_del,
13660 * i40e_config_netdev - Setup the netdev flags
13661 * @vsi: the VSI being configured
13663 * Returns 0 on success, negative value on failure
13665 static int i40e_config_netdev(struct i40e_vsi *vsi)
13667 struct i40e_pf *pf = vsi->back;
13668 struct i40e_hw *hw = &pf->hw;
13669 struct i40e_netdev_priv *np;
13670 struct net_device *netdev;
13671 u8 broadcast[ETH_ALEN];
13672 u8 mac_addr[ETH_ALEN];
13674 netdev_features_t hw_enc_features;
13675 netdev_features_t hw_features;
13677 etherdev_size = sizeof(struct i40e_netdev_priv);
13678 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
13682 vsi->netdev = netdev;
13683 np = netdev_priv(netdev);
13686 hw_enc_features = NETIF_F_SG |
13689 NETIF_F_SOFT_FEATURES |
13694 NETIF_F_GSO_GRE_CSUM |
13695 NETIF_F_GSO_PARTIAL |
13696 NETIF_F_GSO_IPXIP4 |
13697 NETIF_F_GSO_IPXIP6 |
13698 NETIF_F_GSO_UDP_TUNNEL |
13699 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13700 NETIF_F_GSO_UDP_L4 |
13706 if (!test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps))
13707 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
13709 netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
13711 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
13713 netdev->hw_enc_features |= hw_enc_features;
13715 /* record features VLANs can make use of */
13716 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
13718 #define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
13719 NETIF_F_GSO_GRE_CSUM | \
13720 NETIF_F_GSO_IPXIP4 | \
13721 NETIF_F_GSO_IPXIP6 | \
13722 NETIF_F_GSO_UDP_TUNNEL | \
13723 NETIF_F_GSO_UDP_TUNNEL_CSUM)
13725 netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES;
13726 netdev->features |= NETIF_F_GSO_PARTIAL |
13727 I40E_GSO_PARTIAL_FEATURES;
13729 netdev->mpls_features |= NETIF_F_SG;
13730 netdev->mpls_features |= NETIF_F_HW_CSUM;
13731 netdev->mpls_features |= NETIF_F_TSO;
13732 netdev->mpls_features |= NETIF_F_TSO6;
13733 netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES;
13735 /* enable macvlan offloads */
13736 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
13738 hw_features = hw_enc_features |
13739 NETIF_F_HW_VLAN_CTAG_TX |
13740 NETIF_F_HW_VLAN_CTAG_RX;
13742 if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
13743 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
13745 netdev->hw_features |= hw_features | NETIF_F_LOOPBACK;
13747 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
13748 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
13750 netdev->features &= ~NETIF_F_HW_TC;
13752 if (vsi->type == I40E_VSI_MAIN) {
13753 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
13754 ether_addr_copy(mac_addr, hw->mac.perm_addr);
13755 /* The following steps are necessary for two reasons. First,
13756 * some older NVM configurations load a default MAC-VLAN
13757 * filter that will accept any tagged packet, and we want to
13758 * replace this with a normal filter. Additionally, it is
13759 * possible our MAC address was provided by the platform using
13760 * Open Firmware or similar.
13762 * Thus, we need to remove the default filter and install one
13763 * specific to the MAC address.
13765 i40e_rm_default_mac_filter(vsi, mac_addr);
13766 spin_lock_bh(&vsi->mac_filter_hash_lock);
13767 i40e_add_mac_filter(vsi, mac_addr);
13768 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13770 netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
13771 NETDEV_XDP_ACT_REDIRECT |
13772 NETDEV_XDP_ACT_XSK_ZEROCOPY |
13773 NETDEV_XDP_ACT_RX_SG;
13774 netdev->xdp_zc_max_segs = I40E_MAX_BUFFER_TXD;
13776 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
13777 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
13778 * the end, which is 4 bytes long, so force truncation of the
13779 * original name by IFNAMSIZ - 4
13781 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
13783 pf->vsi[pf->lan_vsi]->netdev->name);
13784 eth_random_addr(mac_addr);
13786 spin_lock_bh(&vsi->mac_filter_hash_lock);
13787 i40e_add_mac_filter(vsi, mac_addr);
13788 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13791 /* Add the broadcast filter so that we initially will receive
13792 * broadcast packets. Note that when a new VLAN is first added the
13793 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
13794 * specific filters as part of transitioning into "vlan" operation.
13795 * When more VLANs are added, the driver will copy each existing MAC
13796 * filter and add it for the new VLAN.
13798 * Broadcast filters are handled specially by
13799 * i40e_sync_filters_subtask, as the driver must to set the broadcast
13800 * promiscuous bit instead of adding this directly as a MAC/VLAN
13801 * filter. The subtask will update the correct broadcast promiscuous
13802 * bits as VLANs become active or inactive.
13804 eth_broadcast_addr(broadcast);
13805 spin_lock_bh(&vsi->mac_filter_hash_lock);
13806 i40e_add_mac_filter(vsi, broadcast);
13807 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13809 eth_hw_addr_set(netdev, mac_addr);
13810 ether_addr_copy(netdev->perm_addr, mac_addr);
13812 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
13813 netdev->neigh_priv_len = sizeof(u32) * 4;
13815 netdev->priv_flags |= IFF_UNICAST_FLT;
13816 netdev->priv_flags |= IFF_SUPP_NOFCS;
13817 /* Setup netdev TC information */
13818 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
13820 netdev->netdev_ops = &i40e_netdev_ops;
13821 netdev->watchdog_timeo = 5 * HZ;
13822 i40e_set_ethtool_ops(netdev);
13824 /* MTU range: 68 - 9706 */
13825 netdev->min_mtu = ETH_MIN_MTU;
13826 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13832 * i40e_vsi_delete - Delete a VSI from the switch
13833 * @vsi: the VSI being removed
13835 * Returns 0 on success, negative value on failure
13837 static void i40e_vsi_delete(struct i40e_vsi *vsi)
13839 /* remove default VSI is not allowed */
13840 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13843 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
13847 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
13848 * @vsi: the VSI being queried
13850 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
13852 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13854 struct i40e_veb *veb;
13855 struct i40e_pf *pf = vsi->back;
13857 /* Uplink is not a bridge so default to VEB */
13858 if (vsi->veb_idx >= I40E_MAX_VEB)
13861 veb = pf->veb[vsi->veb_idx];
13863 dev_info(&pf->pdev->dev,
13864 "There is no veb associated with the bridge\n");
13868 /* Uplink is a bridge in VEPA mode */
13869 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13872 /* Uplink is a bridge in VEB mode */
13876 /* VEPA is now default bridge, so return 0 */
13881 * i40e_add_vsi - Add a VSI to the switch
13882 * @vsi: the VSI being configured
13884 * This initializes a VSI context depending on the VSI type to be added and
13885 * passes it down to the add_vsi aq command.
13887 static int i40e_add_vsi(struct i40e_vsi *vsi)
13890 struct i40e_pf *pf = vsi->back;
13891 struct i40e_hw *hw = &pf->hw;
13892 struct i40e_vsi_context ctxt;
13893 struct i40e_mac_filter *f;
13894 struct hlist_node *h;
13897 u8 enabled_tc = 0x1; /* TC0 enabled */
13900 memset(&ctxt, 0, sizeof(ctxt));
13901 switch (vsi->type) {
13902 case I40E_VSI_MAIN:
13903 /* The PF's main VSI is already setup as part of the
13904 * device initialization, so we'll not bother with
13905 * the add_vsi call, but we will retrieve the current
13908 ctxt.seid = pf->main_vsi_seid;
13909 ctxt.pf_num = pf->hw.pf_id;
13911 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13912 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13914 dev_info(&pf->pdev->dev,
13915 "couldn't get PF vsi config, err %pe aq_err %s\n",
13917 i40e_aq_str(&pf->hw,
13918 pf->hw.aq.asq_last_status));
13921 vsi->info = ctxt.info;
13922 vsi->info.valid_sections = 0;
13924 vsi->seid = ctxt.seid;
13925 vsi->id = ctxt.vsi_number;
13927 enabled_tc = i40e_pf_get_tc_map(pf);
13929 /* Source pruning is enabled by default, so the flag is
13930 * negative logic - if it's set, we need to fiddle with
13931 * the VSI to disable source pruning.
13933 if (test_bit(I40E_FLAG_SOURCE_PRUNING_DIS, pf->flags)) {
13934 memset(&ctxt, 0, sizeof(ctxt));
13935 ctxt.seid = pf->main_vsi_seid;
13936 ctxt.pf_num = pf->hw.pf_id;
13938 ctxt.info.valid_sections |=
13939 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13940 ctxt.info.switch_id =
13941 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13942 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13944 dev_info(&pf->pdev->dev,
13945 "update vsi failed, err %d aq_err %s\n",
13947 i40e_aq_str(&pf->hw,
13948 pf->hw.aq.asq_last_status));
13954 /* MFP mode setup queue map and update VSI */
13955 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) &&
13956 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
13957 memset(&ctxt, 0, sizeof(ctxt));
13958 ctxt.seid = pf->main_vsi_seid;
13959 ctxt.pf_num = pf->hw.pf_id;
13961 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13962 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13964 dev_info(&pf->pdev->dev,
13965 "update vsi failed, err %pe aq_err %s\n",
13967 i40e_aq_str(&pf->hw,
13968 pf->hw.aq.asq_last_status));
13972 /* update the local VSI info queue map */
13973 i40e_vsi_update_queue_map(vsi, &ctxt);
13974 vsi->info.valid_sections = 0;
13976 /* Default/Main VSI is only enabled for TC0
13977 * reconfigure it to enable all TCs that are
13978 * available on the port in SFP mode.
13979 * For MFP case the iSCSI PF would use this
13980 * flow to enable LAN+iSCSI TC.
13982 ret = i40e_vsi_config_tc(vsi, enabled_tc);
13984 /* Single TC condition is not fatal,
13985 * message and continue
13987 dev_info(&pf->pdev->dev,
13988 "failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n",
13991 i40e_aq_str(&pf->hw,
13992 pf->hw.aq.asq_last_status));
13997 case I40E_VSI_FDIR:
13998 ctxt.pf_num = hw->pf_id;
14000 ctxt.uplink_seid = vsi->uplink_seid;
14001 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
14002 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
14003 if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags) &&
14004 (i40e_is_vsi_uplink_mode_veb(vsi))) {
14005 ctxt.info.valid_sections |=
14006 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
14007 ctxt.info.switch_id =
14008 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
14010 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
14013 case I40E_VSI_VMDQ2:
14014 ctxt.pf_num = hw->pf_id;
14016 ctxt.uplink_seid = vsi->uplink_seid;
14017 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
14018 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
14020 /* This VSI is connected to VEB so the switch_id
14021 * should be set to zero by default.
14023 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
14024 ctxt.info.valid_sections |=
14025 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
14026 ctxt.info.switch_id =
14027 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
14030 /* Setup the VSI tx/rx queue map for TC0 only for now */
14031 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
14034 case I40E_VSI_SRIOV:
14035 ctxt.pf_num = hw->pf_id;
14036 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
14037 ctxt.uplink_seid = vsi->uplink_seid;
14038 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
14039 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
14041 /* This VSI is connected to VEB so the switch_id
14042 * should be set to zero by default.
14044 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
14045 ctxt.info.valid_sections |=
14046 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
14047 ctxt.info.switch_id =
14048 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
14051 if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) {
14052 ctxt.info.valid_sections |=
14053 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
14054 ctxt.info.queueing_opt_flags |=
14055 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
14056 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
14059 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
14060 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
14061 if (pf->vf[vsi->vf_id].spoofchk) {
14062 ctxt.info.valid_sections |=
14063 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
14064 ctxt.info.sec_flags |=
14065 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
14066 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
14068 /* Setup the VSI tx/rx queue map for TC0 only for now */
14069 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
14072 case I40E_VSI_IWARP:
14073 /* send down message to iWARP */
14080 if (vsi->type != I40E_VSI_MAIN) {
14081 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
14083 dev_info(&vsi->back->pdev->dev,
14084 "add vsi failed, err %pe aq_err %s\n",
14086 i40e_aq_str(&pf->hw,
14087 pf->hw.aq.asq_last_status));
14091 vsi->info = ctxt.info;
14092 vsi->info.valid_sections = 0;
14093 vsi->seid = ctxt.seid;
14094 vsi->id = ctxt.vsi_number;
14097 spin_lock_bh(&vsi->mac_filter_hash_lock);
14098 vsi->active_filters = 0;
14099 /* If macvlan filters already exist, force them to get loaded */
14100 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
14101 f->state = I40E_FILTER_NEW;
14104 spin_unlock_bh(&vsi->mac_filter_hash_lock);
14105 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
14108 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
14109 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
14112 /* Update VSI BW information */
14113 ret = i40e_vsi_get_bw_info(vsi);
14115 dev_info(&pf->pdev->dev,
14116 "couldn't get vsi bw info, err %pe aq_err %s\n",
14118 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14119 /* VSI is already added so not tearing that up */
14128 * i40e_vsi_release - Delete a VSI and free its resources
14129 * @vsi: the VSI being removed
14131 * Returns 0 on success or < 0 on error
14133 int i40e_vsi_release(struct i40e_vsi *vsi)
14135 struct i40e_mac_filter *f;
14136 struct hlist_node *h;
14137 struct i40e_veb *veb = NULL;
14138 struct i40e_pf *pf;
14144 /* release of a VEB-owner or last VSI is not allowed */
14145 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
14146 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
14147 vsi->seid, vsi->uplink_seid);
14150 if (vsi == pf->vsi[pf->lan_vsi] &&
14151 !test_bit(__I40E_DOWN, pf->state)) {
14152 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
14155 set_bit(__I40E_VSI_RELEASING, vsi->state);
14156 uplink_seid = vsi->uplink_seid;
14158 if (vsi->type != I40E_VSI_SRIOV) {
14159 if (vsi->netdev_registered) {
14160 vsi->netdev_registered = false;
14162 /* results in a call to i40e_close() */
14163 unregister_netdev(vsi->netdev);
14166 i40e_vsi_close(vsi);
14168 i40e_vsi_disable_irq(vsi);
14171 if (vsi->type == I40E_VSI_MAIN)
14172 i40e_devlink_destroy_port(pf);
14174 spin_lock_bh(&vsi->mac_filter_hash_lock);
14176 /* clear the sync flag on all filters */
14178 __dev_uc_unsync(vsi->netdev, NULL);
14179 __dev_mc_unsync(vsi->netdev, NULL);
14182 /* make sure any remaining filters are marked for deletion */
14183 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
14184 __i40e_del_filter(vsi, f);
14186 spin_unlock_bh(&vsi->mac_filter_hash_lock);
14188 i40e_sync_vsi_filters(vsi);
14190 i40e_vsi_delete(vsi);
14191 i40e_vsi_free_q_vectors(vsi);
14193 free_netdev(vsi->netdev);
14194 vsi->netdev = NULL;
14196 i40e_vsi_clear_rings(vsi);
14197 i40e_vsi_clear(vsi);
14199 /* If this was the last thing on the VEB, except for the
14200 * controlling VSI, remove the VEB, which puts the controlling
14201 * VSI onto the next level down in the switch.
14203 * Well, okay, there's one more exception here: don't remove
14204 * the orphan VEBs yet. We'll wait for an explicit remove request
14205 * from up the network stack.
14207 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
14209 pf->vsi[i]->uplink_seid == uplink_seid &&
14210 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
14211 n++; /* count the VSIs */
14214 for (i = 0; i < I40E_MAX_VEB; i++) {
14217 if (pf->veb[i]->uplink_seid == uplink_seid)
14218 n++; /* count the VEBs */
14219 if (pf->veb[i]->seid == uplink_seid)
14222 if (n == 0 && veb && veb->uplink_seid != 0)
14223 i40e_veb_release(veb);
14229 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
14230 * @vsi: ptr to the VSI
14232 * This should only be called after i40e_vsi_mem_alloc() which allocates the
14233 * corresponding SW VSI structure and initializes num_queue_pairs for the
14234 * newly allocated VSI.
14236 * Returns 0 on success or negative on failure
14238 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
14241 struct i40e_pf *pf = vsi->back;
14243 if (vsi->q_vectors[0]) {
14244 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
14249 if (vsi->base_vector) {
14250 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
14251 vsi->seid, vsi->base_vector);
14255 ret = i40e_vsi_alloc_q_vectors(vsi);
14257 dev_info(&pf->pdev->dev,
14258 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
14259 vsi->num_q_vectors, vsi->seid, ret);
14260 vsi->num_q_vectors = 0;
14261 goto vector_setup_out;
14264 /* In Legacy mode, we do not have to get any other vector since we
14265 * piggyback on the misc/ICR0 for queue interrupts.
14267 if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
14269 if (vsi->num_q_vectors)
14270 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
14271 vsi->num_q_vectors, vsi->idx);
14272 if (vsi->base_vector < 0) {
14273 dev_info(&pf->pdev->dev,
14274 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
14275 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
14276 i40e_vsi_free_q_vectors(vsi);
14278 goto vector_setup_out;
14286 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
14287 * @vsi: pointer to the vsi.
14289 * This re-allocates a vsi's queue resources.
14291 * Returns pointer to the successfully allocated and configured VSI sw struct
14292 * on success, otherwise returns NULL on failure.
14294 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
14296 u16 alloc_queue_pairs;
14297 struct i40e_pf *pf;
14306 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
14307 i40e_vsi_clear_rings(vsi);
14309 i40e_vsi_free_arrays(vsi, false);
14310 i40e_set_num_rings_in_vsi(vsi);
14311 ret = i40e_vsi_alloc_arrays(vsi, false);
14315 alloc_queue_pairs = vsi->alloc_queue_pairs *
14316 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14318 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14320 dev_info(&pf->pdev->dev,
14321 "failed to get tracking for %d queues for VSI %d err %d\n",
14322 alloc_queue_pairs, vsi->seid, ret);
14325 vsi->base_queue = ret;
14327 /* Update the FW view of the VSI. Force a reset of TC and queue
14328 * layout configurations.
14330 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14331 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14332 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14333 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14334 if (vsi->type == I40E_VSI_MAIN)
14335 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
14337 /* assign it some queues */
14338 ret = i40e_alloc_rings(vsi);
14342 /* map all of the rings to the q_vectors */
14343 i40e_vsi_map_rings_to_vectors(vsi);
14347 i40e_vsi_free_q_vectors(vsi);
14348 if (vsi->netdev_registered) {
14349 vsi->netdev_registered = false;
14350 unregister_netdev(vsi->netdev);
14351 free_netdev(vsi->netdev);
14352 vsi->netdev = NULL;
14354 if (vsi->type == I40E_VSI_MAIN)
14355 i40e_devlink_destroy_port(pf);
14356 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14358 i40e_vsi_clear(vsi);
14363 * i40e_vsi_setup - Set up a VSI by a given type
14364 * @pf: board private structure
14366 * @uplink_seid: the switch element to link to
14367 * @param1: usage depends upon VSI type. For VF types, indicates VF id
14369 * This allocates the sw VSI structure and its queue resources, then add a VSI
14370 * to the identified VEB.
14372 * Returns pointer to the successfully allocated and configure VSI sw struct on
14373 * success, otherwise returns NULL on failure.
14375 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
14376 u16 uplink_seid, u32 param1)
14378 struct i40e_vsi *vsi = NULL;
14379 struct i40e_veb *veb = NULL;
14380 u16 alloc_queue_pairs;
14384 /* The requested uplink_seid must be either
14385 * - the PF's port seid
14386 * no VEB is needed because this is the PF
14387 * or this is a Flow Director special case VSI
14388 * - seid of an existing VEB
14389 * - seid of a VSI that owns an existing VEB
14390 * - seid of a VSI that doesn't own a VEB
14391 * a new VEB is created and the VSI becomes the owner
14392 * - seid of the PF VSI, which is what creates the first VEB
14393 * this is a special case of the previous
14395 * Find which uplink_seid we were given and create a new VEB if needed
14397 for (i = 0; i < I40E_MAX_VEB; i++) {
14398 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
14404 if (!veb && uplink_seid != pf->mac_seid) {
14406 for (i = 0; i < pf->num_alloc_vsi; i++) {
14407 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
14413 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
14418 if (vsi->uplink_seid == pf->mac_seid)
14419 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
14420 vsi->tc_config.enabled_tc);
14421 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
14422 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
14423 vsi->tc_config.enabled_tc);
14425 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
14426 dev_info(&vsi->back->pdev->dev,
14427 "New VSI creation error, uplink seid of LAN VSI expected.\n");
14430 /* We come up by default in VEPA mode if SRIOV is not
14431 * already enabled, in which case we can't force VEPA
14434 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
14435 veb->bridge_mode = BRIDGE_MODE_VEPA;
14436 clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
14438 i40e_config_bridge_mode(veb);
14440 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
14441 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
14445 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
14449 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14450 uplink_seid = veb->seid;
14453 /* get vsi sw struct */
14454 v_idx = i40e_vsi_mem_alloc(pf, type);
14457 vsi = pf->vsi[v_idx];
14461 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
14463 if (type == I40E_VSI_MAIN)
14464 pf->lan_vsi = v_idx;
14465 else if (type == I40E_VSI_SRIOV)
14466 vsi->vf_id = param1;
14467 /* assign it some queues */
14468 alloc_queue_pairs = vsi->alloc_queue_pairs *
14469 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14471 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14473 dev_info(&pf->pdev->dev,
14474 "failed to get tracking for %d queues for VSI %d err=%d\n",
14475 alloc_queue_pairs, vsi->seid, ret);
14478 vsi->base_queue = ret;
14480 /* get a VSI from the hardware */
14481 vsi->uplink_seid = uplink_seid;
14482 ret = i40e_add_vsi(vsi);
14486 switch (vsi->type) {
14487 /* setup the netdev if needed */
14488 case I40E_VSI_MAIN:
14489 case I40E_VSI_VMDQ2:
14490 ret = i40e_config_netdev(vsi);
14493 ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
14496 if (vsi->type == I40E_VSI_MAIN) {
14497 ret = i40e_devlink_create_port(pf);
14500 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
14502 ret = register_netdev(vsi->netdev);
14505 vsi->netdev_registered = true;
14506 netif_carrier_off(vsi->netdev);
14507 #ifdef CONFIG_I40E_DCB
14508 /* Setup DCB netlink interface */
14509 i40e_dcbnl_setup(vsi);
14510 #endif /* CONFIG_I40E_DCB */
14512 case I40E_VSI_FDIR:
14513 /* set up vectors and rings if needed */
14514 ret = i40e_vsi_setup_vectors(vsi);
14518 ret = i40e_alloc_rings(vsi);
14522 /* map all of the rings to the q_vectors */
14523 i40e_vsi_map_rings_to_vectors(vsi);
14525 i40e_vsi_reset_stats(vsi);
14528 /* no netdev or rings for the other VSI types */
14532 if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) &&
14533 vsi->type == I40E_VSI_VMDQ2) {
14534 ret = i40e_vsi_config_rss(vsi);
14541 i40e_vsi_clear_rings(vsi);
14543 i40e_vsi_free_q_vectors(vsi);
14545 if (vsi->netdev_registered) {
14546 vsi->netdev_registered = false;
14547 unregister_netdev(vsi->netdev);
14548 free_netdev(vsi->netdev);
14549 vsi->netdev = NULL;
14552 if (vsi->type == I40E_VSI_MAIN)
14553 i40e_devlink_destroy_port(pf);
14555 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14557 i40e_vsi_clear(vsi);
14563 * i40e_veb_get_bw_info - Query VEB BW information
14564 * @veb: the veb to query
14566 * Query the Tx scheduler BW configuration data for given VEB
14568 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
14570 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
14571 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
14572 struct i40e_pf *pf = veb->pf;
14573 struct i40e_hw *hw = &pf->hw;
14578 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
14581 dev_info(&pf->pdev->dev,
14582 "query veb bw config failed, err %pe aq_err %s\n",
14584 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14588 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
14591 dev_info(&pf->pdev->dev,
14592 "query veb bw ets config failed, err %pe aq_err %s\n",
14594 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14598 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
14599 veb->bw_max_quanta = ets_data.tc_bw_max;
14600 veb->is_abs_credits = bw_data.absolute_credits_enable;
14601 veb->enabled_tc = ets_data.tc_valid_bits;
14602 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
14603 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
14604 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
14605 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
14606 veb->bw_tc_limit_credits[i] =
14607 le16_to_cpu(bw_data.tc_bw_limits[i]);
14608 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
14616 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
14617 * @pf: board private structure
14619 * On error: returns error code (negative)
14620 * On success: returns vsi index in PF (positive)
14622 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
14625 struct i40e_veb *veb;
14628 /* Need to protect the allocation of switch elements at the PF level */
14629 mutex_lock(&pf->switch_mutex);
14631 /* VEB list may be fragmented if VEB creation/destruction has
14632 * been happening. We can afford to do a quick scan to look
14633 * for any free slots in the list.
14635 * find next empty veb slot, looping back around if necessary
14638 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
14640 if (i >= I40E_MAX_VEB) {
14642 goto err_alloc_veb; /* out of VEB slots! */
14645 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
14648 goto err_alloc_veb;
14652 veb->enabled_tc = 1;
14657 mutex_unlock(&pf->switch_mutex);
14662 * i40e_switch_branch_release - Delete a branch of the switch tree
14663 * @branch: where to start deleting
14665 * This uses recursion to find the tips of the branch to be
14666 * removed, deleting until we get back to and can delete this VEB.
14668 static void i40e_switch_branch_release(struct i40e_veb *branch)
14670 struct i40e_pf *pf = branch->pf;
14671 u16 branch_seid = branch->seid;
14672 u16 veb_idx = branch->idx;
14675 /* release any VEBs on this VEB - RECURSION */
14676 for (i = 0; i < I40E_MAX_VEB; i++) {
14679 if (pf->veb[i]->uplink_seid == branch->seid)
14680 i40e_switch_branch_release(pf->veb[i]);
14683 /* Release the VSIs on this VEB, but not the owner VSI.
14685 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
14686 * the VEB itself, so don't use (*branch) after this loop.
14688 for (i = 0; i < pf->num_alloc_vsi; i++) {
14691 if (pf->vsi[i]->uplink_seid == branch_seid &&
14692 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
14693 i40e_vsi_release(pf->vsi[i]);
14697 /* There's one corner case where the VEB might not have been
14698 * removed, so double check it here and remove it if needed.
14699 * This case happens if the veb was created from the debugfs
14700 * commands and no VSIs were added to it.
14702 if (pf->veb[veb_idx])
14703 i40e_veb_release(pf->veb[veb_idx]);
14707 * i40e_veb_clear - remove veb struct
14708 * @veb: the veb to remove
14710 static void i40e_veb_clear(struct i40e_veb *veb)
14716 struct i40e_pf *pf = veb->pf;
14718 mutex_lock(&pf->switch_mutex);
14719 if (pf->veb[veb->idx] == veb)
14720 pf->veb[veb->idx] = NULL;
14721 mutex_unlock(&pf->switch_mutex);
14728 * i40e_veb_release - Delete a VEB and free its resources
14729 * @veb: the VEB being removed
14731 void i40e_veb_release(struct i40e_veb *veb)
14733 struct i40e_vsi *vsi = NULL;
14734 struct i40e_pf *pf;
14739 /* find the remaining VSI and check for extras */
14740 for (i = 0; i < pf->num_alloc_vsi; i++) {
14741 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
14747 dev_info(&pf->pdev->dev,
14748 "can't remove VEB %d with %d VSIs left\n",
14753 /* move the remaining VSI to uplink veb */
14754 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
14755 if (veb->uplink_seid) {
14756 vsi->uplink_seid = veb->uplink_seid;
14757 if (veb->uplink_seid == pf->mac_seid)
14758 vsi->veb_idx = I40E_NO_VEB;
14760 vsi->veb_idx = veb->veb_idx;
14763 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
14764 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
14767 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14768 i40e_veb_clear(veb);
14772 * i40e_add_veb - create the VEB in the switch
14773 * @veb: the VEB to be instantiated
14774 * @vsi: the controlling VSI
14776 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
14778 struct i40e_pf *pf = veb->pf;
14779 bool enable_stats = !!test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
14782 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
14783 veb->enabled_tc, false,
14784 &veb->seid, enable_stats, NULL);
14786 /* get a VEB from the hardware */
14788 dev_info(&pf->pdev->dev,
14789 "couldn't add VEB, err %pe aq_err %s\n",
14791 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14795 /* get statistics counter */
14796 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
14797 &veb->stats_idx, NULL, NULL, NULL);
14799 dev_info(&pf->pdev->dev,
14800 "couldn't get VEB statistics idx, err %pe aq_err %s\n",
14802 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14805 ret = i40e_veb_get_bw_info(veb);
14807 dev_info(&pf->pdev->dev,
14808 "couldn't get VEB bw info, err %pe aq_err %s\n",
14810 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14811 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14815 vsi->uplink_seid = veb->seid;
14816 vsi->veb_idx = veb->idx;
14817 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14823 * i40e_veb_setup - Set up a VEB
14824 * @pf: board private structure
14825 * @flags: VEB setup flags
14826 * @uplink_seid: the switch element to link to
14827 * @vsi_seid: the initial VSI seid
14828 * @enabled_tc: Enabled TC bit-map
14830 * This allocates the sw VEB structure and links it into the switch
14831 * It is possible and legal for this to be a duplicate of an already
14832 * existing VEB. It is also possible for both uplink and vsi seids
14833 * to be zero, in order to create a floating VEB.
14835 * Returns pointer to the successfully allocated VEB sw struct on
14836 * success, otherwise returns NULL on failure.
14838 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14839 u16 uplink_seid, u16 vsi_seid,
14842 struct i40e_veb *veb, *uplink_veb = NULL;
14843 int vsi_idx, veb_idx;
14846 /* if one seid is 0, the other must be 0 to create a floating relay */
14847 if ((uplink_seid == 0 || vsi_seid == 0) &&
14848 (uplink_seid + vsi_seid != 0)) {
14849 dev_info(&pf->pdev->dev,
14850 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14851 uplink_seid, vsi_seid);
14855 /* make sure there is such a vsi and uplink */
14856 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
14857 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
14859 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
14860 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
14865 if (uplink_seid && uplink_seid != pf->mac_seid) {
14866 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
14867 if (pf->veb[veb_idx] &&
14868 pf->veb[veb_idx]->seid == uplink_seid) {
14869 uplink_veb = pf->veb[veb_idx];
14874 dev_info(&pf->pdev->dev,
14875 "uplink seid %d not found\n", uplink_seid);
14880 /* get veb sw struct */
14881 veb_idx = i40e_veb_mem_alloc(pf);
14884 veb = pf->veb[veb_idx];
14885 veb->flags = flags;
14886 veb->uplink_seid = uplink_seid;
14887 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14888 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14890 /* create the VEB in the switch */
14891 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
14894 if (vsi_idx == pf->lan_vsi)
14895 pf->lan_veb = veb->idx;
14900 i40e_veb_clear(veb);
14906 * i40e_setup_pf_switch_element - set PF vars based on switch type
14907 * @pf: board private structure
14908 * @ele: element we are building info from
14909 * @num_reported: total number of elements
14910 * @printconfig: should we print the contents
14912 * helper function to assist in extracting a few useful SEID values.
14914 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14915 struct i40e_aqc_switch_config_element_resp *ele,
14916 u16 num_reported, bool printconfig)
14918 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14919 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14920 u8 element_type = ele->element_type;
14921 u16 seid = le16_to_cpu(ele->seid);
14924 dev_info(&pf->pdev->dev,
14925 "type=%d seid=%d uplink=%d downlink=%d\n",
14926 element_type, seid, uplink_seid, downlink_seid);
14928 switch (element_type) {
14929 case I40E_SWITCH_ELEMENT_TYPE_MAC:
14930 pf->mac_seid = seid;
14932 case I40E_SWITCH_ELEMENT_TYPE_VEB:
14934 if (uplink_seid != pf->mac_seid)
14936 if (pf->lan_veb >= I40E_MAX_VEB) {
14939 /* find existing or else empty VEB */
14940 for (v = 0; v < I40E_MAX_VEB; v++) {
14941 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14946 if (pf->lan_veb >= I40E_MAX_VEB) {
14947 v = i40e_veb_mem_alloc(pf);
14953 if (pf->lan_veb >= I40E_MAX_VEB)
14956 pf->veb[pf->lan_veb]->seid = seid;
14957 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14958 pf->veb[pf->lan_veb]->pf = pf;
14959 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
14961 case I40E_SWITCH_ELEMENT_TYPE_VSI:
14962 if (num_reported != 1)
14964 /* This is immediately after a reset so we can assume this is
14967 pf->mac_seid = uplink_seid;
14968 pf->main_vsi_seid = seid;
14970 dev_info(&pf->pdev->dev,
14971 "pf_seid=%d main_vsi_seid=%d\n",
14972 downlink_seid, pf->main_vsi_seid);
14974 case I40E_SWITCH_ELEMENT_TYPE_PF:
14975 case I40E_SWITCH_ELEMENT_TYPE_VF:
14976 case I40E_SWITCH_ELEMENT_TYPE_EMP:
14977 case I40E_SWITCH_ELEMENT_TYPE_BMC:
14978 case I40E_SWITCH_ELEMENT_TYPE_PE:
14979 case I40E_SWITCH_ELEMENT_TYPE_PA:
14980 /* ignore these for now */
14983 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14984 element_type, seid);
14990 * i40e_fetch_switch_configuration - Get switch config from firmware
14991 * @pf: board private structure
14992 * @printconfig: should we print the contents
14994 * Get the current switch configuration from the device and
14995 * extract a few useful SEID values.
14997 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14999 struct i40e_aqc_get_switch_config_resp *sw_config;
15005 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
15009 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
15011 u16 num_reported, num_total;
15013 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
15017 dev_info(&pf->pdev->dev,
15018 "get switch config failed err %d aq_err %s\n",
15020 i40e_aq_str(&pf->hw,
15021 pf->hw.aq.asq_last_status));
15026 num_reported = le16_to_cpu(sw_config->header.num_reported);
15027 num_total = le16_to_cpu(sw_config->header.num_total);
15030 dev_info(&pf->pdev->dev,
15031 "header: %d reported %d total\n",
15032 num_reported, num_total);
15034 for (i = 0; i < num_reported; i++) {
15035 struct i40e_aqc_switch_config_element_resp *ele =
15036 &sw_config->element[i];
15038 i40e_setup_pf_switch_element(pf, ele, num_reported,
15041 } while (next_seid != 0);
15048 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
15049 * @pf: board private structure
15050 * @reinit: if the Main VSI needs to re-initialized.
15051 * @lock_acquired: indicates whether or not the lock has been acquired
15053 * Returns 0 on success, negative value on failure
15055 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
15060 /* find out what's out there already */
15061 ret = i40e_fetch_switch_configuration(pf, false);
15063 dev_info(&pf->pdev->dev,
15064 "couldn't fetch switch config, err %pe aq_err %s\n",
15066 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15069 i40e_pf_reset_stats(pf);
15071 /* set the switch config bit for the whole device to
15072 * support limited promisc or true promisc
15073 * when user requests promisc. The default is limited
15077 if ((pf->hw.pf_id == 0) &&
15078 !test_bit(I40E_FLAG_TRUE_PROMISC_ENA, pf->flags)) {
15079 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
15080 pf->last_sw_conf_flags = flags;
15083 if (pf->hw.pf_id == 0) {
15086 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
15087 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
15089 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
15090 dev_info(&pf->pdev->dev,
15091 "couldn't set switch config bits, err %pe aq_err %s\n",
15093 i40e_aq_str(&pf->hw,
15094 pf->hw.aq.asq_last_status));
15095 /* not a fatal problem, just keep going */
15097 pf->last_sw_conf_valid_flags = valid_flags;
15100 /* first time setup */
15101 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
15102 struct i40e_vsi *vsi = NULL;
15105 /* Set up the PF VSI associated with the PF's main VSI
15106 * that is already in the HW switch
15108 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
15109 uplink_seid = pf->veb[pf->lan_veb]->seid;
15111 uplink_seid = pf->mac_seid;
15112 if (pf->lan_vsi == I40E_NO_VSI)
15113 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
15115 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
15117 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
15118 i40e_cloud_filter_exit(pf);
15119 i40e_fdir_teardown(pf);
15123 /* force a reset of TC and queue layout configurations */
15124 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
15126 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
15127 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
15128 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
15130 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
15132 i40e_fdir_sb_setup(pf);
15134 /* Setup static PF queue filter control settings */
15135 ret = i40e_setup_pf_filter_control(pf);
15137 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
15139 /* Failure here should not stop continuing other steps */
15142 /* enable RSS in the HW, even for only one queue, as the stack can use
15145 if (test_bit(I40E_FLAG_RSS_ENA, pf->flags))
15146 i40e_pf_config_rss(pf);
15148 /* fill in link information and enable LSE reporting */
15149 i40e_link_event(pf);
15153 if (!lock_acquired)
15156 /* repopulate tunnel port filters */
15157 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
15159 if (!lock_acquired)
15166 * i40e_determine_queue_usage - Work out queue distribution
15167 * @pf: board private structure
15169 static void i40e_determine_queue_usage(struct i40e_pf *pf)
15174 pf->num_lan_qps = 0;
15176 /* Find the max queues to be put into basic use. We'll always be
15177 * using TC0, whether or not DCB is running, and TC0 will get the
15180 queues_left = pf->hw.func_caps.num_tx_qp;
15182 if ((queues_left == 1) ||
15183 !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
15184 /* one qp for PF, no queues for anything else */
15186 pf->alloc_rss_size = pf->num_lan_qps = 1;
15188 /* make sure all the fancies are disabled */
15189 clear_bit(I40E_FLAG_RSS_ENA, pf->flags);
15190 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
15191 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
15192 clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
15193 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
15194 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
15195 clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
15196 clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
15197 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
15198 } else if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags) &&
15199 !test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
15200 !test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
15201 !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) {
15202 /* one qp for PF */
15203 pf->alloc_rss_size = pf->num_lan_qps = 1;
15204 queues_left -= pf->num_lan_qps;
15206 clear_bit(I40E_FLAG_RSS_ENA, pf->flags);
15207 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
15208 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
15209 clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
15210 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
15211 clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
15212 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
15214 /* Not enough queues for all TCs */
15215 if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags) &&
15216 queues_left < I40E_MAX_TRAFFIC_CLASS) {
15217 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
15218 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
15219 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
15222 /* limit lan qps to the smaller of qps, cpus or msix */
15223 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
15224 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
15225 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
15226 pf->num_lan_qps = q_max;
15228 queues_left -= pf->num_lan_qps;
15231 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
15232 if (queues_left > 1) {
15233 queues_left -= 1; /* save 1 queue for FD */
15235 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
15236 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
15237 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
15241 if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) &&
15242 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
15243 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
15244 (queues_left / pf->num_vf_qps));
15245 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
15248 if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) &&
15249 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
15250 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
15251 (queues_left / pf->num_vmdq_qps));
15252 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
15255 pf->queues_left = queues_left;
15256 dev_dbg(&pf->pdev->dev,
15257 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
15258 pf->hw.func_caps.num_tx_qp,
15259 !!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags),
15260 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
15261 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
15266 * i40e_setup_pf_filter_control - Setup PF static filter control
15267 * @pf: PF to be setup
15269 * i40e_setup_pf_filter_control sets up a PF's initial filter control
15270 * settings. If PE/FCoE are enabled then it will also set the per PF
15271 * based filter sizes required for them. It also enables Flow director,
15272 * ethertype and macvlan type filter settings for the pf.
15274 * Returns 0 on success, negative on failure
15276 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
15278 struct i40e_filter_control_settings *settings = &pf->filter_settings;
15280 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
15282 /* Flow Director is enabled */
15283 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ||
15284 test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
15285 settings->enable_fdir = true;
15287 /* Ethtype and MACVLAN filters enabled for PF */
15288 settings->enable_ethtype = true;
15289 settings->enable_macvlan = true;
15291 if (i40e_set_filter_control(&pf->hw, settings))
15297 #define INFO_STRING_LEN 255
15298 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
15299 static void i40e_print_features(struct i40e_pf *pf)
15301 struct i40e_hw *hw = &pf->hw;
15305 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
15309 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
15310 #ifdef CONFIG_PCI_IOV
15311 i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
15313 i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
15314 pf->hw.func_caps.num_vsis,
15315 pf->vsi[pf->lan_vsi]->num_queue_pairs);
15316 if (test_bit(I40E_FLAG_RSS_ENA, pf->flags))
15317 i += scnprintf(&buf[i], REMAIN(i), " RSS");
15318 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
15319 i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
15320 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
15321 i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
15322 i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
15324 if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
15325 i += scnprintf(&buf[i], REMAIN(i), " DCB");
15326 i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
15327 i += scnprintf(&buf[i], REMAIN(i), " Geneve");
15328 if (test_bit(I40E_FLAG_PTP_ENA, pf->flags))
15329 i += scnprintf(&buf[i], REMAIN(i), " PTP");
15330 if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
15331 i += scnprintf(&buf[i], REMAIN(i), " VEB");
15333 i += scnprintf(&buf[i], REMAIN(i), " VEPA");
15335 dev_info(&pf->pdev->dev, "%s\n", buf);
15337 WARN_ON(i > INFO_STRING_LEN);
15341 * i40e_get_platform_mac_addr - get platform-specific MAC address
15342 * @pdev: PCI device information struct
15343 * @pf: board private structure
15345 * Look up the MAC address for the device. First we'll try
15346 * eth_platform_get_mac_address, which will check Open Firmware, or arch
15347 * specific fallback. Otherwise, we'll default to the stored value in
15350 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
15352 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
15353 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
15357 * i40e_set_fec_in_flags - helper function for setting FEC options in flags
15358 * @fec_cfg: FEC option to set in flags
15359 * @flags: ptr to flags in which we set FEC option
15361 void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags)
15363 if (fec_cfg & I40E_AQ_SET_FEC_AUTO) {
15364 set_bit(I40E_FLAG_RS_FEC, flags);
15365 set_bit(I40E_FLAG_BASE_R_FEC, flags);
15367 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
15368 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
15369 set_bit(I40E_FLAG_RS_FEC, flags);
15370 clear_bit(I40E_FLAG_BASE_R_FEC, flags);
15372 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
15373 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
15374 set_bit(I40E_FLAG_BASE_R_FEC, flags);
15375 clear_bit(I40E_FLAG_RS_FEC, flags);
15377 if (fec_cfg == 0) {
15378 clear_bit(I40E_FLAG_RS_FEC, flags);
15379 clear_bit(I40E_FLAG_BASE_R_FEC, flags);
15384 * i40e_check_recovery_mode - check if we are running transition firmware
15385 * @pf: board private structure
15387 * Check registers indicating the firmware runs in recovery mode. Sets the
15388 * appropriate driver state.
15390 * Returns true if the recovery mode was detected, false otherwise
15392 static bool i40e_check_recovery_mode(struct i40e_pf *pf)
15394 u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
15396 if (val & I40E_GL_FWSTS_FWS1B_MASK) {
15397 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
15398 dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
15399 set_bit(__I40E_RECOVERY_MODE, pf->state);
15403 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15404 dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
15410 * i40e_pf_loop_reset - perform reset in a loop.
15411 * @pf: board private structure
15413 * This function is useful when a NIC is about to enter recovery mode.
15414 * When a NIC's internal data structures are corrupted the NIC's
15415 * firmware is going to enter recovery mode.
15416 * Right after a POR it takes about 7 minutes for firmware to enter
15417 * recovery mode. Until that time a NIC is in some kind of intermediate
15418 * state. After that time period the NIC almost surely enters
15419 * recovery mode. The only way for a driver to detect intermediate
15420 * state is to issue a series of pf-resets and check a return value.
15421 * If a PF reset returns success then the firmware could be in recovery
15422 * mode so the caller of this code needs to check for recovery mode
15423 * if this function returns success. There is a little chance that
15424 * firmware will hang in intermediate state forever.
15425 * Since waiting 7 minutes is quite a lot of time this function waits
15426 * 10 seconds and then gives up by returning an error.
15428 * Return 0 on success, negative on failure.
15430 static int i40e_pf_loop_reset(struct i40e_pf *pf)
15432 /* wait max 10 seconds for PF reset to succeed */
15433 const unsigned long time_end = jiffies + 10 * HZ;
15434 struct i40e_hw *hw = &pf->hw;
15437 ret = i40e_pf_reset(hw);
15438 while (ret != 0 && time_before(jiffies, time_end)) {
15439 usleep_range(10000, 20000);
15440 ret = i40e_pf_reset(hw);
15446 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
15452 * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
15453 * @pf: board private structure
15455 * Check FW registers to determine if FW issued unexpected EMP Reset.
15456 * Every time when unexpected EMP Reset occurs the FW increments
15457 * a counter of unexpected EMP Resets. When the counter reaches 10
15458 * the FW should enter the Recovery mode
15460 * Returns true if FW issued unexpected EMP Reset
15462 static bool i40e_check_fw_empr(struct i40e_pf *pf)
15464 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
15465 I40E_GL_FWSTS_FWS1B_MASK;
15466 return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
15467 (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
15471 * i40e_handle_resets - handle EMP resets and PF resets
15472 * @pf: board private structure
15474 * Handle both EMP resets and PF resets and conclude whether there are
15475 * any issues regarding these resets. If there are any issues then
15476 * generate log entry.
15478 * Return 0 if NIC is healthy or negative value when there are issues
15481 static int i40e_handle_resets(struct i40e_pf *pf)
15483 const int pfr = i40e_pf_loop_reset(pf);
15484 const bool is_empr = i40e_check_fw_empr(pf);
15486 if (is_empr || pfr != 0)
15487 dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
15489 return is_empr ? -EIO : pfr;
15493 * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
15494 * @pf: board private structure
15495 * @hw: ptr to the hardware info
15497 * This function does a minimal setup of all subsystems needed for running
15500 * Returns 0 on success, negative on failure
15502 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
15504 struct i40e_vsi *vsi;
15508 pci_set_drvdata(pf->pdev, pf);
15509 pci_save_state(pf->pdev);
15511 /* set up periodic task facility */
15512 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15513 pf->service_timer_period = HZ;
15515 INIT_WORK(&pf->service_task, i40e_service_task);
15516 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15518 err = i40e_init_interrupt_scheme(pf);
15520 goto err_switch_setup;
15522 /* The number of VSIs reported by the FW is the minimum guaranteed
15523 * to us; HW supports far more and we share the remaining pool with
15524 * the other PFs. We allocate space for more than the guarantee with
15525 * the understanding that we might not get them all later.
15527 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15528 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15530 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15532 /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
15533 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15537 goto err_switch_setup;
15540 /* We allocate one VSI which is needed as absolute minimum
15541 * in order to register the netdev
15543 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
15546 goto err_switch_setup;
15548 pf->lan_vsi = v_idx;
15549 vsi = pf->vsi[v_idx];
15552 goto err_switch_setup;
15554 vsi->alloc_queue_pairs = 1;
15555 err = i40e_config_netdev(vsi);
15557 goto err_switch_setup;
15558 err = register_netdev(vsi->netdev);
15560 goto err_switch_setup;
15561 vsi->netdev_registered = true;
15562 i40e_dbg_pf_init(pf);
15564 err = i40e_setup_misc_vector_for_recovery_mode(pf);
15566 goto err_switch_setup;
15568 /* tell the firmware that we're starting */
15569 i40e_send_version(pf);
15571 /* since everything's happy, start the service_task timer */
15572 mod_timer(&pf->service_timer,
15573 round_jiffies(jiffies + pf->service_timer_period));
15578 i40e_reset_interrupt_capability(pf);
15579 timer_shutdown_sync(&pf->service_timer);
15580 i40e_shutdown_adminq(hw);
15581 iounmap(hw->hw_addr);
15582 pci_release_mem_regions(pf->pdev);
15583 pci_disable_device(pf->pdev);
15590 * i40e_set_subsystem_device_id - set subsystem device id
15591 * @hw: pointer to the hardware info
15593 * Set PCI subsystem device id either from a pci_dev structure or
15594 * a specific FW register.
15596 static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw)
15598 struct i40e_pf *pf = i40e_hw_to_pf(hw);
15600 hw->subsystem_device_id = pf->pdev->subsystem_device ?
15601 pf->pdev->subsystem_device :
15602 (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX);
15606 * i40e_probe - Device initialization routine
15607 * @pdev: PCI device information struct
15608 * @ent: entry in i40e_pci_tbl
15610 * i40e_probe initializes a PF identified by a pci_dev structure.
15611 * The OS initialization, configuring of the PF private structure,
15612 * and a hardware reset occur.
15614 * Returns 0 on success, negative on failure
15616 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
15618 struct i40e_aq_get_phy_abilities_resp abilities;
15619 #ifdef CONFIG_I40E_DCB
15620 enum i40e_get_fw_lldp_status_resp lldp_status;
15621 #endif /* CONFIG_I40E_DCB */
15622 struct i40e_pf *pf;
15623 struct i40e_hw *hw;
15627 #ifdef CONFIG_I40E_DCB
15629 #endif /* CONFIG_I40E_DCB */
15634 err = pci_enable_device_mem(pdev);
15638 /* set up for high or low dma */
15639 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
15641 dev_err(&pdev->dev,
15642 "DMA configuration failed: 0x%x\n", err);
15646 /* set up pci connections */
15647 err = pci_request_mem_regions(pdev, i40e_driver_name);
15649 dev_info(&pdev->dev,
15650 "pci_request_selected_regions failed %d\n", err);
15654 pci_set_master(pdev);
15656 /* Now that we have a PCI connection, we need to do the
15657 * low level device setup. This is primarily setting up
15658 * the Admin Queue structures and then querying for the
15659 * device's current profile information.
15661 pf = i40e_alloc_pf(&pdev->dev);
15668 set_bit(__I40E_DOWN, pf->state);
15672 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
15673 I40E_MAX_CSR_SPACE);
15674 /* We believe that the highest register to read is
15675 * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
15676 * is not less than that before mapping to prevent a
15679 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
15680 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
15685 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
15686 if (!hw->hw_addr) {
15688 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
15689 (unsigned int)pci_resource_start(pdev, 0),
15690 pf->ioremap_len, err);
15693 hw->vendor_id = pdev->vendor;
15694 hw->device_id = pdev->device;
15695 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
15696 hw->subsystem_vendor_id = pdev->subsystem_vendor;
15697 i40e_set_subsystem_device_id(hw);
15698 hw->bus.device = PCI_SLOT(pdev->devfn);
15699 hw->bus.func = PCI_FUNC(pdev->devfn);
15700 hw->bus.bus_id = pdev->bus->number;
15702 /* Select something other than the 802.1ad ethertype for the
15703 * switch to use internally and drop on ingress.
15705 hw->switch_tag = 0xffff;
15706 hw->first_tag = ETH_P_8021AD;
15707 hw->second_tag = ETH_P_8021Q;
15709 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
15710 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
15711 INIT_LIST_HEAD(&pf->ddp_old_prof);
15713 /* set up the locks for the AQ, do this only once in probe
15714 * and destroy them only once in remove
15716 mutex_init(&hw->aq.asq_mutex);
15717 mutex_init(&hw->aq.arq_mutex);
15719 pf->msg_enable = netif_msg_init(debug,
15724 pf->hw.debug_mask = debug;
15726 /* do a special CORER for clearing PXE mode once at init */
15727 if (hw->revision_id == 0 &&
15728 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
15729 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
15734 i40e_clear_pxe_mode(hw);
15737 /* Reset here to make sure all is clean and to define PF 'n' */
15740 err = i40e_set_mac_type(hw);
15742 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15747 err = i40e_handle_resets(pf);
15751 i40e_check_recovery_mode(pf);
15753 if (is_kdump_kernel()) {
15754 hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN;
15755 hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN;
15757 hw->aq.num_arq_entries = I40E_AQ_LEN;
15758 hw->aq.num_asq_entries = I40E_AQ_LEN;
15760 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15761 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15763 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
15765 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
15767 err = i40e_init_shared_code(hw);
15769 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15774 /* set up a default setting for link flow control */
15775 pf->hw.fc.requested_mode = I40E_FC_NONE;
15777 err = i40e_init_adminq(hw);
15780 dev_info(&pdev->dev,
15781 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
15782 hw->aq.api_maj_ver,
15783 hw->aq.api_min_ver,
15784 I40E_FW_API_VERSION_MAJOR,
15785 I40E_FW_MINOR_VERSION(hw));
15787 dev_info(&pdev->dev,
15788 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
15792 i40e_get_oem_version(hw);
15793 i40e_get_pba_string(hw);
15795 /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
15796 i40e_nvm_version_str(hw, nvm_ver, sizeof(nvm_ver));
15797 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
15798 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
15799 hw->aq.api_maj_ver, hw->aq.api_min_ver, nvm_ver,
15800 hw->vendor_id, hw->device_id, hw->subsystem_vendor_id,
15801 hw->subsystem_device_id);
15803 if (i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR,
15804 I40E_FW_MINOR_VERSION(hw) + 1))
15805 dev_dbg(&pdev->dev,
15806 "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
15807 hw->aq.api_maj_ver,
15808 hw->aq.api_min_ver,
15809 I40E_FW_API_VERSION_MAJOR,
15810 I40E_FW_MINOR_VERSION(hw));
15811 else if (i40e_is_aq_api_ver_lt(hw, 1, 4))
15812 dev_info(&pdev->dev,
15813 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
15814 hw->aq.api_maj_ver,
15815 hw->aq.api_min_ver,
15816 I40E_FW_API_VERSION_MAJOR,
15817 I40E_FW_MINOR_VERSION(hw));
15819 i40e_verify_eeprom(pf);
15821 /* Rev 0 hardware was never productized */
15822 if (hw->revision_id < 1)
15823 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
15825 i40e_clear_pxe_mode(hw);
15827 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
15829 goto err_adminq_setup;
15831 err = i40e_sw_init(pf);
15833 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
15837 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15838 return i40e_init_recovery_mode(pf, hw);
15840 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
15841 hw->func_caps.num_rx_qp, 0, 0);
15843 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
15844 goto err_init_lan_hmc;
15847 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
15849 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
15851 goto err_configure_lan_hmc;
15854 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
15855 * Ignore error return codes because if it was already disabled via
15856 * hardware settings this will fail
15858 if (test_bit(I40E_HW_CAP_STOP_FW_LLDP, pf->hw.caps)) {
15859 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
15860 i40e_aq_stop_lldp(hw, true, false, NULL);
15863 /* allow a platform config to override the HW addr */
15864 i40e_get_platform_mac_addr(pdev, pf);
15866 if (!is_valid_ether_addr(hw->mac.addr)) {
15867 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
15871 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
15872 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
15873 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
15874 if (is_valid_ether_addr(hw->mac.port_addr))
15875 set_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps);
15877 i40e_ptp_alloc_pins(pf);
15878 pci_set_drvdata(pdev, pf);
15879 pci_save_state(pdev);
15881 #ifdef CONFIG_I40E_DCB
15882 status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
15884 lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
15885 (clear_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) :
15886 (set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags));
15887 dev_info(&pdev->dev,
15888 test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ?
15889 "FW LLDP is disabled\n" :
15890 "FW LLDP is enabled\n");
15892 /* Enable FW to write default DCB config on link-up */
15893 i40e_aq_set_dcb_parameters(hw, true, NULL);
15895 err = i40e_init_pf_dcb(pf);
15897 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
15898 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
15899 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
15900 /* Continue without DCB enabled */
15902 #endif /* CONFIG_I40E_DCB */
15904 /* set up periodic task facility */
15905 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15906 pf->service_timer_period = HZ;
15908 INIT_WORK(&pf->service_task, i40e_service_task);
15909 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15911 /* NVM bit on means WoL disabled for the port */
15912 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
15913 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
15914 pf->wol_en = false;
15917 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
15919 /* set up the main switch operations */
15920 i40e_determine_queue_usage(pf);
15921 err = i40e_init_interrupt_scheme(pf);
15923 goto err_switch_setup;
15925 /* Reduce Tx and Rx pairs for kdump
15926 * When MSI-X is enabled, it's not allowed to use more TC queue
15927 * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus
15928 * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1.
15930 if (is_kdump_kernel())
15931 pf->num_lan_msix = 1;
15933 pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
15934 pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
15935 pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
15936 pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
15937 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
15938 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
15939 UDP_TUNNEL_TYPE_GENEVE;
15941 /* The number of VSIs reported by the FW is the minimum guaranteed
15942 * to us; HW supports far more and we share the remaining pool with
15943 * the other PFs. We allocate space for more than the guarantee with
15944 * the understanding that we might not get them all later.
15946 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15947 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15949 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15950 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
15951 dev_warn(&pf->pdev->dev,
15952 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
15953 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
15954 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
15957 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
15958 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15962 goto err_switch_setup;
15965 #ifdef CONFIG_PCI_IOV
15966 /* prep for VF support */
15967 if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) &&
15968 test_bit(I40E_FLAG_MSIX_ENA, pf->flags) &&
15969 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15970 if (pci_num_vf(pdev))
15971 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
15974 err = i40e_setup_pf_switch(pf, false, false);
15976 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15979 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15981 /* if FDIR VSI was set up, start it now */
15982 for (i = 0; i < pf->num_alloc_vsi; i++) {
15983 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
15984 i40e_vsi_open(pf->vsi[i]);
15989 /* The driver only wants link up/down and module qualification
15990 * reports from firmware. Note the negative logic.
15992 err = i40e_aq_set_phy_int_mask(&pf->hw,
15993 ~(I40E_AQ_EVENT_LINK_UPDOWN |
15994 I40E_AQ_EVENT_MEDIA_NA |
15995 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
15997 dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
15999 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
16001 /* Reconfigure hardware for allowing smaller MSS in the case
16002 * of TSO, so that we avoid the MDD being fired and causing
16003 * a reset in the case of small MSS+TSO.
16005 val = rd32(hw, I40E_REG_MSS);
16006 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
16007 val &= ~I40E_REG_MSS_MIN_MASK;
16008 val |= I40E_64BYTE_MSS;
16009 wr32(hw, I40E_REG_MSS, val);
16012 if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) {
16014 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
16016 dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
16018 i40e_aq_str(&pf->hw,
16019 pf->hw.aq.asq_last_status));
16021 /* The main driver is (mostly) up and happy. We need to set this state
16022 * before setting up the misc vector or we get a race and the vector
16023 * ends up disabled forever.
16025 clear_bit(__I40E_DOWN, pf->state);
16027 /* In case of MSIX we are going to setup the misc vector right here
16028 * to handle admin queue events etc. In case of legacy and MSI
16029 * the misc functionality and queue processing is combined in
16030 * the same vector and that gets setup at open.
16032 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
16033 err = i40e_setup_misc_vector(pf);
16035 dev_info(&pdev->dev,
16036 "setup of misc vector failed: %d\n", err);
16037 i40e_cloud_filter_exit(pf);
16038 i40e_fdir_teardown(pf);
16043 #ifdef CONFIG_PCI_IOV
16044 /* prep for VF support */
16045 if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) &&
16046 test_bit(I40E_FLAG_MSIX_ENA, pf->flags) &&
16047 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
16048 /* disable link interrupts for VFs */
16049 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
16050 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
16051 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
16054 if (pci_num_vf(pdev)) {
16055 dev_info(&pdev->dev,
16056 "Active VFs found, allocating resources.\n");
16057 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
16059 dev_info(&pdev->dev,
16060 "Error %d allocating resources for existing VFs\n",
16064 #endif /* CONFIG_PCI_IOV */
16066 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
16067 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
16068 pf->num_iwarp_msix,
16069 I40E_IWARP_IRQ_PILE_ID);
16070 if (pf->iwarp_base_vector < 0) {
16071 dev_info(&pdev->dev,
16072 "failed to get tracking for %d vectors for IWARP err=%d\n",
16073 pf->num_iwarp_msix, pf->iwarp_base_vector);
16074 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
16078 i40e_dbg_pf_init(pf);
16080 /* tell the firmware that we're starting */
16081 i40e_send_version(pf);
16083 /* since everything's happy, start the service_task timer */
16084 mod_timer(&pf->service_timer,
16085 round_jiffies(jiffies + pf->service_timer_period));
16087 /* add this PF to client device list and launch a client service task */
16088 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
16089 err = i40e_lan_add_device(pf);
16091 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
16095 #define PCI_SPEED_SIZE 8
16096 #define PCI_WIDTH_SIZE 8
16097 /* Devices on the IOSF bus do not have this information
16098 * and will report PCI Gen 1 x 1 by default so don't bother
16101 if (!test_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, pf->hw.caps)) {
16102 char speed[PCI_SPEED_SIZE] = "Unknown";
16103 char width[PCI_WIDTH_SIZE] = "Unknown";
16105 /* Get the negotiated link width and speed from PCI config
16108 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
16111 i40e_set_pci_config_data(hw, link_status);
16113 switch (hw->bus.speed) {
16114 case i40e_bus_speed_8000:
16115 strscpy(speed, "8.0", PCI_SPEED_SIZE); break;
16116 case i40e_bus_speed_5000:
16117 strscpy(speed, "5.0", PCI_SPEED_SIZE); break;
16118 case i40e_bus_speed_2500:
16119 strscpy(speed, "2.5", PCI_SPEED_SIZE); break;
16123 switch (hw->bus.width) {
16124 case i40e_bus_width_pcie_x8:
16125 strscpy(width, "8", PCI_WIDTH_SIZE); break;
16126 case i40e_bus_width_pcie_x4:
16127 strscpy(width, "4", PCI_WIDTH_SIZE); break;
16128 case i40e_bus_width_pcie_x2:
16129 strscpy(width, "2", PCI_WIDTH_SIZE); break;
16130 case i40e_bus_width_pcie_x1:
16131 strscpy(width, "1", PCI_WIDTH_SIZE); break;
16136 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
16139 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
16140 hw->bus.speed < i40e_bus_speed_8000) {
16141 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
16142 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
16146 /* get the requested speeds from the fw */
16147 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
16149 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n",
16151 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
16152 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
16154 /* set the FEC config due to the board capabilities */
16155 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, pf->flags);
16157 /* get the supported phy types from the fw */
16158 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
16160 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n",
16162 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
16164 /* make sure the MFS hasn't been set lower than the default */
16165 #define MAX_FRAME_SIZE_DEFAULT 0x2600
16166 val = FIELD_GET(I40E_PRTGL_SAH_MFS_MASK,
16167 rd32(&pf->hw, I40E_PRTGL_SAH));
16168 if (val < MAX_FRAME_SIZE_DEFAULT)
16169 dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
16172 /* Add a filter to drop all Flow control frames from any VSI from being
16173 * transmitted. By doing so we stop a malicious VF from sending out
16174 * PAUSE or PFC frames and potentially controlling traffic for other
16176 * The FW can still send Flow control frames if enabled.
16178 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
16179 pf->main_vsi_seid);
16181 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
16182 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
16183 set_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps);
16184 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
16185 set_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps);
16186 /* print a string summarizing features */
16187 i40e_print_features(pf);
16189 i40e_devlink_register(pf);
16193 /* Unwind what we've done if something failed in the setup */
16195 set_bit(__I40E_DOWN, pf->state);
16196 i40e_clear_interrupt_scheme(pf);
16199 i40e_reset_interrupt_capability(pf);
16200 timer_shutdown_sync(&pf->service_timer);
16202 err_configure_lan_hmc:
16203 (void)i40e_shutdown_lan_hmc(hw);
16205 kfree(pf->qp_pile);
16209 iounmap(hw->hw_addr);
16213 pci_release_mem_regions(pdev);
16216 pci_disable_device(pdev);
16221 * i40e_remove - Device removal routine
16222 * @pdev: PCI device information struct
16224 * i40e_remove is called by the PCI subsystem to alert the driver
16225 * that is should release a PCI device. This could be caused by a
16226 * Hot-Plug event, or because the driver is going to be removed from
16229 static void i40e_remove(struct pci_dev *pdev)
16231 struct i40e_pf *pf = pci_get_drvdata(pdev);
16232 struct i40e_hw *hw = &pf->hw;
16236 i40e_devlink_unregister(pf);
16238 i40e_dbg_pf_exit(pf);
16242 /* Disable RSS in hw */
16243 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
16244 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
16246 /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
16247 * flags, once they are set, i40e_rebuild should not be called as
16248 * i40e_prep_for_reset always returns early.
16250 while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
16251 usleep_range(1000, 2000);
16252 set_bit(__I40E_IN_REMOVE, pf->state);
16254 if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags)) {
16255 set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
16257 clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
16259 /* no more scheduling of any task */
16260 set_bit(__I40E_SUSPENDED, pf->state);
16261 set_bit(__I40E_DOWN, pf->state);
16262 if (pf->service_timer.function)
16263 timer_shutdown_sync(&pf->service_timer);
16264 if (pf->service_task.func)
16265 cancel_work_sync(&pf->service_task);
16267 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
16268 struct i40e_vsi *vsi = pf->vsi[0];
16270 /* We know that we have allocated only one vsi for this PF,
16271 * it was just for registering netdevice, so the interface
16272 * could be visible in the 'ifconfig' output
16274 unregister_netdev(vsi->netdev);
16275 free_netdev(vsi->netdev);
16280 /* Client close must be called explicitly here because the timer
16281 * has been stopped.
16283 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16285 i40e_fdir_teardown(pf);
16287 /* If there is a switch structure or any orphans, remove them.
16288 * This will leave only the PF's VSI remaining.
16290 for (i = 0; i < I40E_MAX_VEB; i++) {
16294 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
16295 pf->veb[i]->uplink_seid == 0)
16296 i40e_switch_branch_release(pf->veb[i]);
16299 /* Now we can shutdown the PF's VSIs, just before we kill
16302 for (i = pf->num_alloc_vsi; i--;)
16304 i40e_vsi_close(pf->vsi[i]);
16305 i40e_vsi_release(pf->vsi[i]);
16309 i40e_cloud_filter_exit(pf);
16311 /* remove attached clients */
16312 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
16313 ret_code = i40e_lan_del_device(pf);
16315 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
16319 /* shutdown and destroy the HMC */
16320 if (hw->hmc.hmc_obj) {
16321 ret_code = i40e_shutdown_lan_hmc(hw);
16323 dev_warn(&pdev->dev,
16324 "Failed to destroy the HMC resources: %d\n",
16329 /* Free MSI/legacy interrupt 0 when in recovery mode. */
16330 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16331 !test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
16332 free_irq(pf->pdev->irq, pf);
16334 /* shutdown the adminq */
16335 i40e_shutdown_adminq(hw);
16337 /* destroy the locks only once, here */
16338 mutex_destroy(&hw->aq.arq_mutex);
16339 mutex_destroy(&hw->aq.asq_mutex);
16341 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
16343 i40e_clear_interrupt_scheme(pf);
16344 for (i = 0; i < pf->num_alloc_vsi; i++) {
16346 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
16347 i40e_vsi_clear_rings(pf->vsi[i]);
16348 i40e_vsi_clear(pf->vsi[i]);
16354 for (i = 0; i < I40E_MAX_VEB; i++) {
16359 kfree(pf->qp_pile);
16362 iounmap(hw->hw_addr);
16364 pci_release_mem_regions(pdev);
16366 pci_disable_device(pdev);
16370 * i40e_pci_error_detected - warning that something funky happened in PCI land
16371 * @pdev: PCI device information struct
16372 * @error: the type of PCI error
16374 * Called to warn that something happened and the error handling steps
16375 * are in progress. Allows the driver to quiesce things, be ready for
16378 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
16379 pci_channel_state_t error)
16381 struct i40e_pf *pf = pci_get_drvdata(pdev);
16383 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
16386 dev_info(&pdev->dev,
16387 "Cannot recover - error happened during device probe\n");
16388 return PCI_ERS_RESULT_DISCONNECT;
16391 /* shutdown all operations */
16392 if (!test_bit(__I40E_SUSPENDED, pf->state))
16393 i40e_prep_for_reset(pf);
16395 /* Request a slot reset */
16396 return PCI_ERS_RESULT_NEED_RESET;
16400 * i40e_pci_error_slot_reset - a PCI slot reset just happened
16401 * @pdev: PCI device information struct
16403 * Called to find if the driver can work with the device now that
16404 * the pci slot has been reset. If a basic connection seems good
16405 * (registers are readable and have sane content) then return a
16406 * happy little PCI_ERS_RESULT_xxx.
16408 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
16410 struct i40e_pf *pf = pci_get_drvdata(pdev);
16411 pci_ers_result_t result;
16414 dev_dbg(&pdev->dev, "%s\n", __func__);
16415 if (pci_enable_device_mem(pdev)) {
16416 dev_info(&pdev->dev,
16417 "Cannot re-enable PCI device after reset.\n");
16418 result = PCI_ERS_RESULT_DISCONNECT;
16420 pci_set_master(pdev);
16421 pci_restore_state(pdev);
16422 pci_save_state(pdev);
16423 pci_wake_from_d3(pdev, false);
16425 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
16427 result = PCI_ERS_RESULT_RECOVERED;
16429 result = PCI_ERS_RESULT_DISCONNECT;
16436 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
16437 * @pdev: PCI device information struct
16439 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
16441 struct i40e_pf *pf = pci_get_drvdata(pdev);
16443 i40e_prep_for_reset(pf);
16447 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
16448 * @pdev: PCI device information struct
16450 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
16452 struct i40e_pf *pf = pci_get_drvdata(pdev);
16454 if (test_bit(__I40E_IN_REMOVE, pf->state))
16457 i40e_reset_and_rebuild(pf, false, false);
16458 #ifdef CONFIG_PCI_IOV
16459 i40e_restore_all_vfs_msi_state(pdev);
16460 #endif /* CONFIG_PCI_IOV */
16464 * i40e_pci_error_resume - restart operations after PCI error recovery
16465 * @pdev: PCI device information struct
16467 * Called to allow the driver to bring things back up after PCI error
16468 * and/or reset recovery has finished.
16470 static void i40e_pci_error_resume(struct pci_dev *pdev)
16472 struct i40e_pf *pf = pci_get_drvdata(pdev);
16474 dev_dbg(&pdev->dev, "%s\n", __func__);
16475 if (test_bit(__I40E_SUSPENDED, pf->state))
16478 i40e_handle_reset_warning(pf, false);
16482 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
16483 * using the mac_address_write admin q function
16484 * @pf: pointer to i40e_pf struct
16486 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
16488 struct i40e_hw *hw = &pf->hw;
16493 /* Get current MAC address in case it's an LAA */
16494 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
16495 ether_addr_copy(mac_addr,
16496 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
16498 dev_err(&pf->pdev->dev,
16499 "Failed to retrieve MAC address; using default\n");
16500 ether_addr_copy(mac_addr, hw->mac.addr);
16503 /* The FW expects the mac address write cmd to first be called with
16504 * one of these flags before calling it again with the multicast
16507 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
16509 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
16510 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
16512 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16514 dev_err(&pf->pdev->dev,
16515 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
16519 flags = I40E_AQC_MC_MAG_EN
16520 | I40E_AQC_WOL_PRESERVE_ON_PFR
16521 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
16522 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16524 dev_err(&pf->pdev->dev,
16525 "Failed to enable Multicast Magic Packet wake up\n");
16529 * i40e_shutdown - PCI callback for shutting down
16530 * @pdev: PCI device information struct
16532 static void i40e_shutdown(struct pci_dev *pdev)
16534 struct i40e_pf *pf = pci_get_drvdata(pdev);
16535 struct i40e_hw *hw = &pf->hw;
16537 set_bit(__I40E_SUSPENDED, pf->state);
16538 set_bit(__I40E_DOWN, pf->state);
16540 del_timer_sync(&pf->service_timer);
16541 cancel_work_sync(&pf->service_task);
16542 i40e_cloud_filter_exit(pf);
16543 i40e_fdir_teardown(pf);
16545 /* Client close must be called explicitly here because the timer
16546 * has been stopped.
16548 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16550 if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
16552 i40e_enable_mc_magic_wake(pf);
16554 i40e_prep_for_reset(pf);
16556 wr32(hw, I40E_PFPM_APM,
16557 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16558 wr32(hw, I40E_PFPM_WUFC,
16559 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16561 /* Free MSI/legacy interrupt 0 when in recovery mode. */
16562 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16563 !test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
16564 free_irq(pf->pdev->irq, pf);
16566 /* Since we're going to destroy queues during the
16567 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16571 i40e_clear_interrupt_scheme(pf);
16574 if (system_state == SYSTEM_POWER_OFF) {
16575 pci_wake_from_d3(pdev, pf->wol_en);
16576 pci_set_power_state(pdev, PCI_D3hot);
16581 * i40e_suspend - PM callback for moving to D3
16582 * @dev: generic device information structure
16584 static int __maybe_unused i40e_suspend(struct device *dev)
16586 struct i40e_pf *pf = dev_get_drvdata(dev);
16587 struct i40e_hw *hw = &pf->hw;
16589 /* If we're already suspended, then there is nothing to do */
16590 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
16593 set_bit(__I40E_DOWN, pf->state);
16595 /* Ensure service task will not be running */
16596 del_timer_sync(&pf->service_timer);
16597 cancel_work_sync(&pf->service_task);
16599 /* Client close must be called explicitly here because the timer
16600 * has been stopped.
16602 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16604 if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
16606 i40e_enable_mc_magic_wake(pf);
16608 /* Since we're going to destroy queues during the
16609 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16614 i40e_prep_for_reset(pf);
16616 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16617 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16619 /* Clear the interrupt scheme and release our IRQs so that the system
16620 * can safely hibernate even when there are a large number of CPUs.
16621 * Otherwise hibernation might fail when mapping all the vectors back
16624 i40e_clear_interrupt_scheme(pf);
16632 * i40e_resume - PM callback for waking up from D3
16633 * @dev: generic device information structure
16635 static int __maybe_unused i40e_resume(struct device *dev)
16637 struct i40e_pf *pf = dev_get_drvdata(dev);
16640 /* If we're not suspended, then there is nothing to do */
16641 if (!test_bit(__I40E_SUSPENDED, pf->state))
16644 /* We need to hold the RTNL lock prior to restoring interrupt schemes,
16645 * since we're going to be restoring queues
16649 /* We cleared the interrupt scheme when we suspended, so we need to
16650 * restore it now to resume device functionality.
16652 err = i40e_restore_interrupt_scheme(pf);
16654 dev_err(dev, "Cannot restore interrupt scheme: %d\n",
16658 clear_bit(__I40E_DOWN, pf->state);
16659 i40e_reset_and_rebuild(pf, false, true);
16663 /* Clear suspended state last after everything is recovered */
16664 clear_bit(__I40E_SUSPENDED, pf->state);
16666 /* Restart the service task */
16667 mod_timer(&pf->service_timer,
16668 round_jiffies(jiffies + pf->service_timer_period));
16673 static const struct pci_error_handlers i40e_err_handler = {
16674 .error_detected = i40e_pci_error_detected,
16675 .slot_reset = i40e_pci_error_slot_reset,
16676 .reset_prepare = i40e_pci_error_reset_prepare,
16677 .reset_done = i40e_pci_error_reset_done,
16678 .resume = i40e_pci_error_resume,
16681 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
16683 static struct pci_driver i40e_driver = {
16684 .name = i40e_driver_name,
16685 .id_table = i40e_pci_tbl,
16686 .probe = i40e_probe,
16687 .remove = i40e_remove,
16689 .pm = &i40e_pm_ops,
16691 .shutdown = i40e_shutdown,
16692 .err_handler = &i40e_err_handler,
16693 .sriov_configure = i40e_pci_sriov_configure,
16697 * i40e_init_module - Driver registration routine
16699 * i40e_init_module is the first routine called when the driver is
16700 * loaded. All it does is register with the PCI subsystem.
16702 static int __init i40e_init_module(void)
16706 pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
16707 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
16709 /* There is no need to throttle the number of active tasks because
16710 * each device limits its own task using a state bit for scheduling
16711 * the service task, and the device tasks do not interfere with each
16712 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
16713 * since we need to be able to guarantee forward progress even under
16716 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
16718 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
16723 err = pci_register_driver(&i40e_driver);
16725 destroy_workqueue(i40e_wq);
16732 module_init(i40e_init_module);
16735 * i40e_exit_module - Driver exit cleanup routine
16737 * i40e_exit_module is called just before the driver is removed
16740 static void __exit i40e_exit_module(void)
16742 pci_unregister_driver(&i40e_driver);
16743 destroy_workqueue(i40e_wq);
16744 ida_destroy(&i40e_client_ida);
16747 module_exit(i40e_exit_module);