1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2017 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/etherdevice.h>
28 #include <linux/of_net.h>
29 #include <linux/pci.h>
30 #include <linux/bpf.h>
34 #include "i40e_diag.h"
35 #include <net/udp_tunnel.h>
36 /* All i40e tracepoints are defined by the include below, which
37 * must be included exactly once across the whole kernel with
38 * CREATE_TRACE_POINTS defined
40 #define CREATE_TRACE_POINTS
41 #include "i40e_trace.h"
43 const char i40e_driver_name[] = "i40e";
44 static const char i40e_driver_string[] =
45 "Intel(R) Ethernet Connection XL710 Network Driver";
49 #define DRV_VERSION_MAJOR 2
50 #define DRV_VERSION_MINOR 1
51 #define DRV_VERSION_BUILD 14
52 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
53 __stringify(DRV_VERSION_MINOR) "." \
54 __stringify(DRV_VERSION_BUILD) DRV_KERN
55 const char i40e_driver_version_str[] = DRV_VERSION;
56 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
58 /* a bit of forward declarations */
59 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
60 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
61 static int i40e_add_vsi(struct i40e_vsi *vsi);
62 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
63 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
64 static int i40e_setup_misc_vector(struct i40e_pf *pf);
65 static void i40e_determine_queue_usage(struct i40e_pf *pf);
66 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
67 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
68 static int i40e_reset(struct i40e_pf *pf);
69 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
70 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
71 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
73 /* i40e_pci_tbl - PCI Device ID Table
75 * Last entry must be all 0s
77 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
78 * Class, Class Mask, private data (not used) }
80 static const struct pci_device_id i40e_pci_tbl[] = {
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
91 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
92 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
93 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
94 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
95 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
96 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
97 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
98 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
99 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
100 /* required last entry */
103 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
105 #define I40E_MAX_VF_COUNT 128
106 static int debug = -1;
107 module_param(debug, uint, 0);
108 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
110 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
111 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
112 MODULE_LICENSE("GPL");
113 MODULE_VERSION(DRV_VERSION);
115 static struct workqueue_struct *i40e_wq;
118 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
119 * @hw: pointer to the HW structure
120 * @mem: ptr to mem struct to fill out
121 * @size: size of memory requested
122 * @alignment: what to align the allocation to
124 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
125 u64 size, u32 alignment)
127 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
129 mem->size = ALIGN(size, alignment);
130 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
131 &mem->pa, GFP_KERNEL);
139 * i40e_free_dma_mem_d - OS specific memory free for shared code
140 * @hw: pointer to the HW structure
141 * @mem: ptr to mem struct to free
143 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
145 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
147 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
156 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
157 * @hw: pointer to the HW structure
158 * @mem: ptr to mem struct to fill out
159 * @size: size of memory requested
161 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
165 mem->va = kzalloc(size, GFP_KERNEL);
174 * i40e_free_virt_mem_d - OS specific memory free for shared code
175 * @hw: pointer to the HW structure
176 * @mem: ptr to mem struct to free
178 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
180 /* it's ok to kfree a NULL pointer */
189 * i40e_get_lump - find a lump of free generic resource
190 * @pf: board private structure
191 * @pile: the pile of resource to search
192 * @needed: the number of items needed
193 * @id: an owner id to stick on the items assigned
195 * Returns the base item index of the lump, or negative for error
197 * The search_hint trick and lack of advanced fit-finding only work
198 * because we're highly likely to have all the same size lump requests.
199 * Linear search time and any fragmentation should be minimal.
201 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
207 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
208 dev_info(&pf->pdev->dev,
209 "param err: pile=%p needed=%d id=0x%04x\n",
214 /* start the linear search with an imperfect hint */
215 i = pile->search_hint;
216 while (i < pile->num_entries) {
217 /* skip already allocated entries */
218 if (pile->list[i] & I40E_PILE_VALID_BIT) {
223 /* do we have enough in this lump? */
224 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
225 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
230 /* there was enough, so assign it to the requestor */
231 for (j = 0; j < needed; j++)
232 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
234 pile->search_hint = i + j;
238 /* not enough, so skip over it and continue looking */
246 * i40e_put_lump - return a lump of generic resource
247 * @pile: the pile of resource to search
248 * @index: the base item index
249 * @id: the owner id of the items assigned
251 * Returns the count of items in the lump
253 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
255 int valid_id = (id | I40E_PILE_VALID_BIT);
259 if (!pile || index >= pile->num_entries)
263 i < pile->num_entries && pile->list[i] == valid_id;
269 if (count && index < pile->search_hint)
270 pile->search_hint = index;
276 * i40e_find_vsi_from_id - searches for the vsi with the given id
277 * @pf - the pf structure to search for the vsi
278 * @id - id of the vsi it is searching for
280 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
284 for (i = 0; i < pf->num_alloc_vsi; i++)
285 if (pf->vsi[i] && (pf->vsi[i]->id == id))
292 * i40e_service_event_schedule - Schedule the service task to wake up
293 * @pf: board private structure
295 * If not already scheduled, this puts the task into the work queue
297 void i40e_service_event_schedule(struct i40e_pf *pf)
299 if (!test_bit(__I40E_DOWN, pf->state) &&
300 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
301 queue_work(i40e_wq, &pf->service_task);
305 * i40e_tx_timeout - Respond to a Tx Hang
306 * @netdev: network interface device structure
308 * If any port has noticed a Tx timeout, it is likely that the whole
309 * device is munged, not just the one netdev port, so go for the full
312 static void i40e_tx_timeout(struct net_device *netdev)
314 struct i40e_netdev_priv *np = netdev_priv(netdev);
315 struct i40e_vsi *vsi = np->vsi;
316 struct i40e_pf *pf = vsi->back;
317 struct i40e_ring *tx_ring = NULL;
318 unsigned int i, hung_queue = 0;
321 pf->tx_timeout_count++;
323 /* find the stopped queue the same way the stack does */
324 for (i = 0; i < netdev->num_tx_queues; i++) {
325 struct netdev_queue *q;
326 unsigned long trans_start;
328 q = netdev_get_tx_queue(netdev, i);
329 trans_start = q->trans_start;
330 if (netif_xmit_stopped(q) &&
332 (trans_start + netdev->watchdog_timeo))) {
338 if (i == netdev->num_tx_queues) {
339 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
341 /* now that we have an index, find the tx_ring struct */
342 for (i = 0; i < vsi->num_queue_pairs; i++) {
343 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
345 vsi->tx_rings[i]->queue_index) {
346 tx_ring = vsi->tx_rings[i];
353 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
354 pf->tx_timeout_recovery_level = 1; /* reset after some time */
355 else if (time_before(jiffies,
356 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
357 return; /* don't do any new action before the next timeout */
360 head = i40e_get_head(tx_ring);
361 /* Read interrupt register */
362 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
364 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
365 tx_ring->vsi->base_vector - 1));
367 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
369 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
370 vsi->seid, hung_queue, tx_ring->next_to_clean,
371 head, tx_ring->next_to_use,
372 readl(tx_ring->tail), val);
375 pf->tx_timeout_last_recovery = jiffies;
376 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
377 pf->tx_timeout_recovery_level, hung_queue);
379 switch (pf->tx_timeout_recovery_level) {
381 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
384 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
387 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
390 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
394 i40e_service_event_schedule(pf);
395 pf->tx_timeout_recovery_level++;
399 * i40e_get_vsi_stats_struct - Get System Network Statistics
400 * @vsi: the VSI we care about
402 * Returns the address of the device statistics structure.
403 * The statistics are actually updated from the service task.
405 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
407 return &vsi->net_stats;
411 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
412 * @ring: Tx ring to get statistics from
413 * @stats: statistics entry to be updated
415 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
416 struct rtnl_link_stats64 *stats)
422 start = u64_stats_fetch_begin_irq(&ring->syncp);
423 packets = ring->stats.packets;
424 bytes = ring->stats.bytes;
425 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
427 stats->tx_packets += packets;
428 stats->tx_bytes += bytes;
432 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
433 * @netdev: network interface device structure
435 * Returns the address of the device statistics structure.
436 * The statistics are actually updated from the service task.
438 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
439 struct rtnl_link_stats64 *stats)
441 struct i40e_netdev_priv *np = netdev_priv(netdev);
442 struct i40e_ring *tx_ring, *rx_ring;
443 struct i40e_vsi *vsi = np->vsi;
444 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
447 if (test_bit(__I40E_VSI_DOWN, vsi->state))
454 for (i = 0; i < vsi->num_queue_pairs; i++) {
458 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
461 i40e_get_netdev_stats_struct_tx(tx_ring, stats);
463 rx_ring = &tx_ring[1];
466 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
467 packets = rx_ring->stats.packets;
468 bytes = rx_ring->stats.bytes;
469 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
471 stats->rx_packets += packets;
472 stats->rx_bytes += bytes;
474 if (i40e_enabled_xdp_vsi(vsi))
475 i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
479 /* following stats updated by i40e_watchdog_subtask() */
480 stats->multicast = vsi_stats->multicast;
481 stats->tx_errors = vsi_stats->tx_errors;
482 stats->tx_dropped = vsi_stats->tx_dropped;
483 stats->rx_errors = vsi_stats->rx_errors;
484 stats->rx_dropped = vsi_stats->rx_dropped;
485 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
486 stats->rx_length_errors = vsi_stats->rx_length_errors;
490 * i40e_vsi_reset_stats - Resets all stats of the given vsi
491 * @vsi: the VSI to have its stats reset
493 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
495 struct rtnl_link_stats64 *ns;
501 ns = i40e_get_vsi_stats_struct(vsi);
502 memset(ns, 0, sizeof(*ns));
503 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
504 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
505 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
506 if (vsi->rx_rings && vsi->rx_rings[0]) {
507 for (i = 0; i < vsi->num_queue_pairs; i++) {
508 memset(&vsi->rx_rings[i]->stats, 0,
509 sizeof(vsi->rx_rings[i]->stats));
510 memset(&vsi->rx_rings[i]->rx_stats, 0,
511 sizeof(vsi->rx_rings[i]->rx_stats));
512 memset(&vsi->tx_rings[i]->stats, 0,
513 sizeof(vsi->tx_rings[i]->stats));
514 memset(&vsi->tx_rings[i]->tx_stats, 0,
515 sizeof(vsi->tx_rings[i]->tx_stats));
518 vsi->stat_offsets_loaded = false;
522 * i40e_pf_reset_stats - Reset all of the stats for the given PF
523 * @pf: the PF to be reset
525 void i40e_pf_reset_stats(struct i40e_pf *pf)
529 memset(&pf->stats, 0, sizeof(pf->stats));
530 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
531 pf->stat_offsets_loaded = false;
533 for (i = 0; i < I40E_MAX_VEB; i++) {
535 memset(&pf->veb[i]->stats, 0,
536 sizeof(pf->veb[i]->stats));
537 memset(&pf->veb[i]->stats_offsets, 0,
538 sizeof(pf->veb[i]->stats_offsets));
539 pf->veb[i]->stat_offsets_loaded = false;
542 pf->hw_csum_rx_error = 0;
546 * i40e_stat_update48 - read and update a 48 bit stat from the chip
547 * @hw: ptr to the hardware info
548 * @hireg: the high 32 bit reg to read
549 * @loreg: the low 32 bit reg to read
550 * @offset_loaded: has the initial offset been loaded yet
551 * @offset: ptr to current offset value
552 * @stat: ptr to the stat
554 * Since the device stats are not reset at PFReset, they likely will not
555 * be zeroed when the driver starts. We'll save the first values read
556 * and use them as offsets to be subtracted from the raw values in order
557 * to report stats that count from zero. In the process, we also manage
558 * the potential roll-over.
560 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
561 bool offset_loaded, u64 *offset, u64 *stat)
565 if (hw->device_id == I40E_DEV_ID_QEMU) {
566 new_data = rd32(hw, loreg);
567 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
569 new_data = rd64(hw, loreg);
573 if (likely(new_data >= *offset))
574 *stat = new_data - *offset;
576 *stat = (new_data + BIT_ULL(48)) - *offset;
577 *stat &= 0xFFFFFFFFFFFFULL;
581 * i40e_stat_update32 - read and update a 32 bit stat from the chip
582 * @hw: ptr to the hardware info
583 * @reg: the hw reg to read
584 * @offset_loaded: has the initial offset been loaded yet
585 * @offset: ptr to current offset value
586 * @stat: ptr to the stat
588 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
589 bool offset_loaded, u64 *offset, u64 *stat)
593 new_data = rd32(hw, reg);
596 if (likely(new_data >= *offset))
597 *stat = (u32)(new_data - *offset);
599 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
603 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
604 * @hw: ptr to the hardware info
605 * @reg: the hw reg to read and clear
606 * @stat: ptr to the stat
608 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
610 u32 new_data = rd32(hw, reg);
612 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
617 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
618 * @vsi: the VSI to be updated
620 void i40e_update_eth_stats(struct i40e_vsi *vsi)
622 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
623 struct i40e_pf *pf = vsi->back;
624 struct i40e_hw *hw = &pf->hw;
625 struct i40e_eth_stats *oes;
626 struct i40e_eth_stats *es; /* device's eth stats */
628 es = &vsi->eth_stats;
629 oes = &vsi->eth_stats_offsets;
631 /* Gather up the stats that the hw collects */
632 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
633 vsi->stat_offsets_loaded,
634 &oes->tx_errors, &es->tx_errors);
635 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
636 vsi->stat_offsets_loaded,
637 &oes->rx_discards, &es->rx_discards);
638 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
639 vsi->stat_offsets_loaded,
640 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
641 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
642 vsi->stat_offsets_loaded,
643 &oes->tx_errors, &es->tx_errors);
645 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
646 I40E_GLV_GORCL(stat_idx),
647 vsi->stat_offsets_loaded,
648 &oes->rx_bytes, &es->rx_bytes);
649 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
650 I40E_GLV_UPRCL(stat_idx),
651 vsi->stat_offsets_loaded,
652 &oes->rx_unicast, &es->rx_unicast);
653 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
654 I40E_GLV_MPRCL(stat_idx),
655 vsi->stat_offsets_loaded,
656 &oes->rx_multicast, &es->rx_multicast);
657 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
658 I40E_GLV_BPRCL(stat_idx),
659 vsi->stat_offsets_loaded,
660 &oes->rx_broadcast, &es->rx_broadcast);
662 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
663 I40E_GLV_GOTCL(stat_idx),
664 vsi->stat_offsets_loaded,
665 &oes->tx_bytes, &es->tx_bytes);
666 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
667 I40E_GLV_UPTCL(stat_idx),
668 vsi->stat_offsets_loaded,
669 &oes->tx_unicast, &es->tx_unicast);
670 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
671 I40E_GLV_MPTCL(stat_idx),
672 vsi->stat_offsets_loaded,
673 &oes->tx_multicast, &es->tx_multicast);
674 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
675 I40E_GLV_BPTCL(stat_idx),
676 vsi->stat_offsets_loaded,
677 &oes->tx_broadcast, &es->tx_broadcast);
678 vsi->stat_offsets_loaded = true;
682 * i40e_update_veb_stats - Update Switch component statistics
683 * @veb: the VEB being updated
685 static void i40e_update_veb_stats(struct i40e_veb *veb)
687 struct i40e_pf *pf = veb->pf;
688 struct i40e_hw *hw = &pf->hw;
689 struct i40e_eth_stats *oes;
690 struct i40e_eth_stats *es; /* device's eth stats */
691 struct i40e_veb_tc_stats *veb_oes;
692 struct i40e_veb_tc_stats *veb_es;
695 idx = veb->stats_idx;
697 oes = &veb->stats_offsets;
698 veb_es = &veb->tc_stats;
699 veb_oes = &veb->tc_stats_offsets;
701 /* Gather up the stats that the hw collects */
702 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
703 veb->stat_offsets_loaded,
704 &oes->tx_discards, &es->tx_discards);
705 if (hw->revision_id > 0)
706 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
707 veb->stat_offsets_loaded,
708 &oes->rx_unknown_protocol,
709 &es->rx_unknown_protocol);
710 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
711 veb->stat_offsets_loaded,
712 &oes->rx_bytes, &es->rx_bytes);
713 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
714 veb->stat_offsets_loaded,
715 &oes->rx_unicast, &es->rx_unicast);
716 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
717 veb->stat_offsets_loaded,
718 &oes->rx_multicast, &es->rx_multicast);
719 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
720 veb->stat_offsets_loaded,
721 &oes->rx_broadcast, &es->rx_broadcast);
723 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
724 veb->stat_offsets_loaded,
725 &oes->tx_bytes, &es->tx_bytes);
726 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
727 veb->stat_offsets_loaded,
728 &oes->tx_unicast, &es->tx_unicast);
729 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
730 veb->stat_offsets_loaded,
731 &oes->tx_multicast, &es->tx_multicast);
732 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
733 veb->stat_offsets_loaded,
734 &oes->tx_broadcast, &es->tx_broadcast);
735 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
736 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
737 I40E_GLVEBTC_RPCL(i, idx),
738 veb->stat_offsets_loaded,
739 &veb_oes->tc_rx_packets[i],
740 &veb_es->tc_rx_packets[i]);
741 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
742 I40E_GLVEBTC_RBCL(i, idx),
743 veb->stat_offsets_loaded,
744 &veb_oes->tc_rx_bytes[i],
745 &veb_es->tc_rx_bytes[i]);
746 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
747 I40E_GLVEBTC_TPCL(i, idx),
748 veb->stat_offsets_loaded,
749 &veb_oes->tc_tx_packets[i],
750 &veb_es->tc_tx_packets[i]);
751 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
752 I40E_GLVEBTC_TBCL(i, idx),
753 veb->stat_offsets_loaded,
754 &veb_oes->tc_tx_bytes[i],
755 &veb_es->tc_tx_bytes[i]);
757 veb->stat_offsets_loaded = true;
761 * i40e_update_vsi_stats - Update the vsi statistics counters.
762 * @vsi: the VSI to be updated
764 * There are a few instances where we store the same stat in a
765 * couple of different structs. This is partly because we have
766 * the netdev stats that need to be filled out, which is slightly
767 * different from the "eth_stats" defined by the chip and used in
768 * VF communications. We sort it out here.
770 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
772 struct i40e_pf *pf = vsi->back;
773 struct rtnl_link_stats64 *ons;
774 struct rtnl_link_stats64 *ns; /* netdev stats */
775 struct i40e_eth_stats *oes;
776 struct i40e_eth_stats *es; /* device's eth stats */
777 u32 tx_restart, tx_busy;
788 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
789 test_bit(__I40E_CONFIG_BUSY, pf->state))
792 ns = i40e_get_vsi_stats_struct(vsi);
793 ons = &vsi->net_stats_offsets;
794 es = &vsi->eth_stats;
795 oes = &vsi->eth_stats_offsets;
797 /* Gather up the netdev and vsi stats that the driver collects
798 * on the fly during packet processing
802 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
806 for (q = 0; q < vsi->num_queue_pairs; q++) {
808 p = ACCESS_ONCE(vsi->tx_rings[q]);
811 start = u64_stats_fetch_begin_irq(&p->syncp);
812 packets = p->stats.packets;
813 bytes = p->stats.bytes;
814 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
817 tx_restart += p->tx_stats.restart_queue;
818 tx_busy += p->tx_stats.tx_busy;
819 tx_linearize += p->tx_stats.tx_linearize;
820 tx_force_wb += p->tx_stats.tx_force_wb;
822 /* Rx queue is part of the same block as Tx queue */
825 start = u64_stats_fetch_begin_irq(&p->syncp);
826 packets = p->stats.packets;
827 bytes = p->stats.bytes;
828 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
831 rx_buf += p->rx_stats.alloc_buff_failed;
832 rx_page += p->rx_stats.alloc_page_failed;
835 vsi->tx_restart = tx_restart;
836 vsi->tx_busy = tx_busy;
837 vsi->tx_linearize = tx_linearize;
838 vsi->tx_force_wb = tx_force_wb;
839 vsi->rx_page_failed = rx_page;
840 vsi->rx_buf_failed = rx_buf;
842 ns->rx_packets = rx_p;
844 ns->tx_packets = tx_p;
847 /* update netdev stats from eth stats */
848 i40e_update_eth_stats(vsi);
849 ons->tx_errors = oes->tx_errors;
850 ns->tx_errors = es->tx_errors;
851 ons->multicast = oes->rx_multicast;
852 ns->multicast = es->rx_multicast;
853 ons->rx_dropped = oes->rx_discards;
854 ns->rx_dropped = es->rx_discards;
855 ons->tx_dropped = oes->tx_discards;
856 ns->tx_dropped = es->tx_discards;
858 /* pull in a couple PF stats if this is the main vsi */
859 if (vsi == pf->vsi[pf->lan_vsi]) {
860 ns->rx_crc_errors = pf->stats.crc_errors;
861 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
862 ns->rx_length_errors = pf->stats.rx_length_errors;
867 * i40e_update_pf_stats - Update the PF statistics counters.
868 * @pf: the PF to be updated
870 static void i40e_update_pf_stats(struct i40e_pf *pf)
872 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
873 struct i40e_hw_port_stats *nsd = &pf->stats;
874 struct i40e_hw *hw = &pf->hw;
878 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
879 I40E_GLPRT_GORCL(hw->port),
880 pf->stat_offsets_loaded,
881 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
882 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
883 I40E_GLPRT_GOTCL(hw->port),
884 pf->stat_offsets_loaded,
885 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
886 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
887 pf->stat_offsets_loaded,
888 &osd->eth.rx_discards,
889 &nsd->eth.rx_discards);
890 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
891 I40E_GLPRT_UPRCL(hw->port),
892 pf->stat_offsets_loaded,
893 &osd->eth.rx_unicast,
894 &nsd->eth.rx_unicast);
895 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
896 I40E_GLPRT_MPRCL(hw->port),
897 pf->stat_offsets_loaded,
898 &osd->eth.rx_multicast,
899 &nsd->eth.rx_multicast);
900 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
901 I40E_GLPRT_BPRCL(hw->port),
902 pf->stat_offsets_loaded,
903 &osd->eth.rx_broadcast,
904 &nsd->eth.rx_broadcast);
905 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
906 I40E_GLPRT_UPTCL(hw->port),
907 pf->stat_offsets_loaded,
908 &osd->eth.tx_unicast,
909 &nsd->eth.tx_unicast);
910 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
911 I40E_GLPRT_MPTCL(hw->port),
912 pf->stat_offsets_loaded,
913 &osd->eth.tx_multicast,
914 &nsd->eth.tx_multicast);
915 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
916 I40E_GLPRT_BPTCL(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->eth.tx_broadcast,
919 &nsd->eth.tx_broadcast);
921 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
922 pf->stat_offsets_loaded,
923 &osd->tx_dropped_link_down,
924 &nsd->tx_dropped_link_down);
926 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
927 pf->stat_offsets_loaded,
928 &osd->crc_errors, &nsd->crc_errors);
930 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
931 pf->stat_offsets_loaded,
932 &osd->illegal_bytes, &nsd->illegal_bytes);
934 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
935 pf->stat_offsets_loaded,
936 &osd->mac_local_faults,
937 &nsd->mac_local_faults);
938 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
939 pf->stat_offsets_loaded,
940 &osd->mac_remote_faults,
941 &nsd->mac_remote_faults);
943 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->rx_length_errors,
946 &nsd->rx_length_errors);
948 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
949 pf->stat_offsets_loaded,
950 &osd->link_xon_rx, &nsd->link_xon_rx);
951 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
952 pf->stat_offsets_loaded,
953 &osd->link_xon_tx, &nsd->link_xon_tx);
954 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
955 pf->stat_offsets_loaded,
956 &osd->link_xoff_rx, &nsd->link_xoff_rx);
957 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->link_xoff_tx, &nsd->link_xoff_tx);
961 for (i = 0; i < 8; i++) {
962 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
963 pf->stat_offsets_loaded,
964 &osd->priority_xoff_rx[i],
965 &nsd->priority_xoff_rx[i]);
966 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
967 pf->stat_offsets_loaded,
968 &osd->priority_xon_rx[i],
969 &nsd->priority_xon_rx[i]);
970 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
971 pf->stat_offsets_loaded,
972 &osd->priority_xon_tx[i],
973 &nsd->priority_xon_tx[i]);
974 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
975 pf->stat_offsets_loaded,
976 &osd->priority_xoff_tx[i],
977 &nsd->priority_xoff_tx[i]);
978 i40e_stat_update32(hw,
979 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
980 pf->stat_offsets_loaded,
981 &osd->priority_xon_2_xoff[i],
982 &nsd->priority_xon_2_xoff[i]);
985 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
986 I40E_GLPRT_PRC64L(hw->port),
987 pf->stat_offsets_loaded,
988 &osd->rx_size_64, &nsd->rx_size_64);
989 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
990 I40E_GLPRT_PRC127L(hw->port),
991 pf->stat_offsets_loaded,
992 &osd->rx_size_127, &nsd->rx_size_127);
993 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
994 I40E_GLPRT_PRC255L(hw->port),
995 pf->stat_offsets_loaded,
996 &osd->rx_size_255, &nsd->rx_size_255);
997 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
998 I40E_GLPRT_PRC511L(hw->port),
999 pf->stat_offsets_loaded,
1000 &osd->rx_size_511, &nsd->rx_size_511);
1001 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1002 I40E_GLPRT_PRC1023L(hw->port),
1003 pf->stat_offsets_loaded,
1004 &osd->rx_size_1023, &nsd->rx_size_1023);
1005 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1006 I40E_GLPRT_PRC1522L(hw->port),
1007 pf->stat_offsets_loaded,
1008 &osd->rx_size_1522, &nsd->rx_size_1522);
1009 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1010 I40E_GLPRT_PRC9522L(hw->port),
1011 pf->stat_offsets_loaded,
1012 &osd->rx_size_big, &nsd->rx_size_big);
1014 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1015 I40E_GLPRT_PTC64L(hw->port),
1016 pf->stat_offsets_loaded,
1017 &osd->tx_size_64, &nsd->tx_size_64);
1018 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1019 I40E_GLPRT_PTC127L(hw->port),
1020 pf->stat_offsets_loaded,
1021 &osd->tx_size_127, &nsd->tx_size_127);
1022 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1023 I40E_GLPRT_PTC255L(hw->port),
1024 pf->stat_offsets_loaded,
1025 &osd->tx_size_255, &nsd->tx_size_255);
1026 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1027 I40E_GLPRT_PTC511L(hw->port),
1028 pf->stat_offsets_loaded,
1029 &osd->tx_size_511, &nsd->tx_size_511);
1030 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1031 I40E_GLPRT_PTC1023L(hw->port),
1032 pf->stat_offsets_loaded,
1033 &osd->tx_size_1023, &nsd->tx_size_1023);
1034 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1035 I40E_GLPRT_PTC1522L(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->tx_size_1522, &nsd->tx_size_1522);
1038 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1039 I40E_GLPRT_PTC9522L(hw->port),
1040 pf->stat_offsets_loaded,
1041 &osd->tx_size_big, &nsd->tx_size_big);
1043 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1044 pf->stat_offsets_loaded,
1045 &osd->rx_undersize, &nsd->rx_undersize);
1046 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1047 pf->stat_offsets_loaded,
1048 &osd->rx_fragments, &nsd->rx_fragments);
1049 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1050 pf->stat_offsets_loaded,
1051 &osd->rx_oversize, &nsd->rx_oversize);
1052 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1053 pf->stat_offsets_loaded,
1054 &osd->rx_jabber, &nsd->rx_jabber);
1057 i40e_stat_update_and_clear32(hw,
1058 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1059 &nsd->fd_atr_match);
1060 i40e_stat_update_and_clear32(hw,
1061 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1063 i40e_stat_update_and_clear32(hw,
1064 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1065 &nsd->fd_atr_tunnel_match);
1067 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1068 nsd->tx_lpi_status =
1069 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1070 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1071 nsd->rx_lpi_status =
1072 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1073 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1074 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1075 pf->stat_offsets_loaded,
1076 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1077 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1078 pf->stat_offsets_loaded,
1079 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1081 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1082 !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED))
1083 nsd->fd_sb_status = true;
1085 nsd->fd_sb_status = false;
1087 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1088 !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
1089 nsd->fd_atr_status = true;
1091 nsd->fd_atr_status = false;
1093 pf->stat_offsets_loaded = true;
1097 * i40e_update_stats - Update the various statistics counters.
1098 * @vsi: the VSI to be updated
1100 * Update the various stats for this VSI and its related entities.
1102 void i40e_update_stats(struct i40e_vsi *vsi)
1104 struct i40e_pf *pf = vsi->back;
1106 if (vsi == pf->vsi[pf->lan_vsi])
1107 i40e_update_pf_stats(pf);
1109 i40e_update_vsi_stats(vsi);
1113 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1114 * @vsi: the VSI to be searched
1115 * @macaddr: the MAC address
1118 * Returns ptr to the filter object or NULL
1120 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1121 const u8 *macaddr, s16 vlan)
1123 struct i40e_mac_filter *f;
1126 if (!vsi || !macaddr)
1129 key = i40e_addr_to_hkey(macaddr);
1130 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1131 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1139 * i40e_find_mac - Find a mac addr in the macvlan filters list
1140 * @vsi: the VSI to be searched
1141 * @macaddr: the MAC address we are searching for
1143 * Returns the first filter with the provided MAC address or NULL if
1144 * MAC address was not found
1146 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1148 struct i40e_mac_filter *f;
1151 if (!vsi || !macaddr)
1154 key = i40e_addr_to_hkey(macaddr);
1155 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1156 if ((ether_addr_equal(macaddr, f->macaddr)))
1163 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1164 * @vsi: the VSI to be searched
1166 * Returns true if VSI is in vlan mode or false otherwise
1168 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1170 /* If we have a PVID, always operate in VLAN mode */
1174 /* We need to operate in VLAN mode whenever we have any filters with
1175 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1176 * time, incurring search cost repeatedly. However, we can notice two
1179 * 1) the only place where we can gain a VLAN filter is in
1182 * 2) the only place where filters are actually removed is in
1183 * i40e_sync_filters_subtask.
1185 * Thus, we can simply use a boolean value, has_vlan_filters which we
1186 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1187 * we have to perform the full search after deleting filters in
1188 * i40e_sync_filters_subtask, but we already have to search
1189 * filters here and can perform the check at the same time. This
1190 * results in avoiding embedding a loop for VLAN mode inside another
1191 * loop over all the filters, and should maintain correctness as noted
1194 return vsi->has_vlan_filter;
1198 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1199 * @vsi: the VSI to configure
1200 * @tmp_add_list: list of filters ready to be added
1201 * @tmp_del_list: list of filters ready to be deleted
1202 * @vlan_filters: the number of active VLAN filters
1204 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1205 * behave as expected. If we have any active VLAN filters remaining or about
1206 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1207 * so that they only match against untagged traffic. If we no longer have any
1208 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1209 * so that they match against both tagged and untagged traffic. In this way,
1210 * we ensure that we correctly receive the desired traffic. This ensures that
1211 * when we have an active VLAN we will receive only untagged traffic and
1212 * traffic matching active VLANs. If we have no active VLANs then we will
1213 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1215 * Finally, in a similar fashion, this function also corrects filters when
1216 * there is an active PVID assigned to this VSI.
1218 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1220 * This function is only expected to be called from within
1221 * i40e_sync_vsi_filters.
1223 * NOTE: This function expects to be called while under the
1224 * mac_filter_hash_lock
1226 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1227 struct hlist_head *tmp_add_list,
1228 struct hlist_head *tmp_del_list,
1231 s16 pvid = le16_to_cpu(vsi->info.pvid);
1232 struct i40e_mac_filter *f, *add_head;
1233 struct i40e_new_mac_filter *new;
1234 struct hlist_node *h;
1237 /* To determine if a particular filter needs to be replaced we
1238 * have the three following conditions:
1240 * a) if we have a PVID assigned, then all filters which are
1241 * not marked as VLAN=PVID must be replaced with filters that
1243 * b) otherwise, if we have any active VLANS, all filters
1244 * which are marked as VLAN=-1 must be replaced with
1245 * filters marked as VLAN=0
1246 * c) finally, if we do not have any active VLANS, all filters
1247 * which are marked as VLAN=0 must be replaced with filters
1251 /* Update the filters about to be added in place */
1252 hlist_for_each_entry(new, tmp_add_list, hlist) {
1253 if (pvid && new->f->vlan != pvid)
1254 new->f->vlan = pvid;
1255 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1257 else if (!vlan_filters && new->f->vlan == 0)
1258 new->f->vlan = I40E_VLAN_ANY;
1261 /* Update the remaining active filters */
1262 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1263 /* Combine the checks for whether a filter needs to be changed
1264 * and then determine the new VLAN inside the if block, in
1265 * order to avoid duplicating code for adding the new filter
1266 * then deleting the old filter.
1268 if ((pvid && f->vlan != pvid) ||
1269 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1270 (!vlan_filters && f->vlan == 0)) {
1271 /* Determine the new vlan we will be adding */
1274 else if (vlan_filters)
1277 new_vlan = I40E_VLAN_ANY;
1279 /* Create the new filter */
1280 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1284 /* Create a temporary i40e_new_mac_filter */
1285 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1290 new->state = add_head->state;
1292 /* Add the new filter to the tmp list */
1293 hlist_add_head(&new->hlist, tmp_add_list);
1295 /* Put the original filter into the delete list */
1296 f->state = I40E_FILTER_REMOVE;
1297 hash_del(&f->hlist);
1298 hlist_add_head(&f->hlist, tmp_del_list);
1302 vsi->has_vlan_filter = !!vlan_filters;
1308 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1309 * @vsi: the PF Main VSI - inappropriate for any other VSI
1310 * @macaddr: the MAC address
1312 * Remove whatever filter the firmware set up so the driver can manage
1313 * its own filtering intelligently.
1315 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1317 struct i40e_aqc_remove_macvlan_element_data element;
1318 struct i40e_pf *pf = vsi->back;
1320 /* Only appropriate for the PF main VSI */
1321 if (vsi->type != I40E_VSI_MAIN)
1324 memset(&element, 0, sizeof(element));
1325 ether_addr_copy(element.mac_addr, macaddr);
1326 element.vlan_tag = 0;
1327 /* Ignore error returns, some firmware does it this way... */
1328 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1329 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1331 memset(&element, 0, sizeof(element));
1332 ether_addr_copy(element.mac_addr, macaddr);
1333 element.vlan_tag = 0;
1334 /* ...and some firmware does it this way. */
1335 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1336 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1337 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1341 * i40e_add_filter - Add a mac/vlan filter to the VSI
1342 * @vsi: the VSI to be searched
1343 * @macaddr: the MAC address
1346 * Returns ptr to the filter object or NULL when no memory available.
1348 * NOTE: This function is expected to be called with mac_filter_hash_lock
1351 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1352 const u8 *macaddr, s16 vlan)
1354 struct i40e_mac_filter *f;
1357 if (!vsi || !macaddr)
1360 f = i40e_find_filter(vsi, macaddr, vlan);
1362 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1366 /* Update the boolean indicating if we need to function in
1370 vsi->has_vlan_filter = true;
1372 ether_addr_copy(f->macaddr, macaddr);
1374 /* If we're in overflow promisc mode, set the state directly
1375 * to failed, so we don't bother to try sending the filter
1378 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))
1379 f->state = I40E_FILTER_FAILED;
1381 f->state = I40E_FILTER_NEW;
1382 INIT_HLIST_NODE(&f->hlist);
1384 key = i40e_addr_to_hkey(macaddr);
1385 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1387 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1388 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1391 /* If we're asked to add a filter that has been marked for removal, it
1392 * is safe to simply restore it to active state. __i40e_del_filter
1393 * will have simply deleted any filters which were previously marked
1394 * NEW or FAILED, so if it is currently marked REMOVE it must have
1395 * previously been ACTIVE. Since we haven't yet run the sync filters
1396 * task, just restore this filter to the ACTIVE state so that the
1397 * sync task leaves it in place
1399 if (f->state == I40E_FILTER_REMOVE)
1400 f->state = I40E_FILTER_ACTIVE;
1406 * __i40e_del_filter - Remove a specific filter from the VSI
1407 * @vsi: VSI to remove from
1408 * @f: the filter to remove from the list
1410 * This function should be called instead of i40e_del_filter only if you know
1411 * the exact filter you will remove already, such as via i40e_find_filter or
1414 * NOTE: This function is expected to be called with mac_filter_hash_lock
1416 * ANOTHER NOTE: This function MUST be called from within the context of
1417 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1418 * instead of list_for_each_entry().
1420 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1425 /* If the filter was never added to firmware then we can just delete it
1426 * directly and we don't want to set the status to remove or else an
1427 * admin queue command will unnecessarily fire.
1429 if ((f->state == I40E_FILTER_FAILED) ||
1430 (f->state == I40E_FILTER_NEW)) {
1431 hash_del(&f->hlist);
1434 f->state = I40E_FILTER_REMOVE;
1437 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1438 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1442 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1443 * @vsi: the VSI to be searched
1444 * @macaddr: the MAC address
1447 * NOTE: This function is expected to be called with mac_filter_hash_lock
1449 * ANOTHER NOTE: This function MUST be called from within the context of
1450 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1451 * instead of list_for_each_entry().
1453 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1455 struct i40e_mac_filter *f;
1457 if (!vsi || !macaddr)
1460 f = i40e_find_filter(vsi, macaddr, vlan);
1461 __i40e_del_filter(vsi, f);
1465 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1466 * @vsi: the VSI to be searched
1467 * @macaddr: the mac address to be filtered
1469 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1470 * go through all the macvlan filters and add a macvlan filter for each
1471 * unique vlan that already exists. If a PVID has been assigned, instead only
1472 * add the macaddr to that VLAN.
1474 * Returns last filter added on success, else NULL
1476 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1479 struct i40e_mac_filter *f, *add = NULL;
1480 struct hlist_node *h;
1484 return i40e_add_filter(vsi, macaddr,
1485 le16_to_cpu(vsi->info.pvid));
1487 if (!i40e_is_vsi_in_vlan(vsi))
1488 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1490 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1491 if (f->state == I40E_FILTER_REMOVE)
1493 add = i40e_add_filter(vsi, macaddr, f->vlan);
1502 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1503 * @vsi: the VSI to be searched
1504 * @macaddr: the mac address to be removed
1506 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1509 * Returns 0 for success, or error
1511 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1513 struct i40e_mac_filter *f;
1514 struct hlist_node *h;
1518 WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
1519 "Missing mac_filter_hash_lock\n");
1520 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1521 if (ether_addr_equal(macaddr, f->macaddr)) {
1522 __i40e_del_filter(vsi, f);
1534 * i40e_set_mac - NDO callback to set mac address
1535 * @netdev: network interface device structure
1536 * @p: pointer to an address structure
1538 * Returns 0 on success, negative on failure
1540 static int i40e_set_mac(struct net_device *netdev, void *p)
1542 struct i40e_netdev_priv *np = netdev_priv(netdev);
1543 struct i40e_vsi *vsi = np->vsi;
1544 struct i40e_pf *pf = vsi->back;
1545 struct i40e_hw *hw = &pf->hw;
1546 struct sockaddr *addr = p;
1548 if (!is_valid_ether_addr(addr->sa_data))
1549 return -EADDRNOTAVAIL;
1551 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1552 netdev_info(netdev, "already using mac address %pM\n",
1557 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
1558 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
1559 return -EADDRNOTAVAIL;
1561 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1562 netdev_info(netdev, "returning to hw mac address %pM\n",
1565 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1567 spin_lock_bh(&vsi->mac_filter_hash_lock);
1568 i40e_del_mac_filter(vsi, netdev->dev_addr);
1569 i40e_add_mac_filter(vsi, addr->sa_data);
1570 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1571 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1572 if (vsi->type == I40E_VSI_MAIN) {
1575 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1576 I40E_AQC_WRITE_TYPE_LAA_WOL,
1577 addr->sa_data, NULL);
1579 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1580 i40e_stat_str(hw, ret),
1581 i40e_aq_str(hw, hw->aq.asq_last_status));
1584 /* schedule our worker thread which will take care of
1585 * applying the new filter changes
1587 i40e_service_event_schedule(vsi->back);
1592 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1593 * @vsi: the VSI being setup
1594 * @ctxt: VSI context structure
1595 * @enabled_tc: Enabled TCs bitmap
1596 * @is_add: True if called before Add VSI
1598 * Setup VSI queue mapping for enabled traffic classes.
1600 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1601 struct i40e_vsi_context *ctxt,
1605 struct i40e_pf *pf = vsi->back;
1615 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1618 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1619 /* Find numtc from enabled TC bitmap */
1620 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1621 if (enabled_tc & BIT(i)) /* TC is enabled */
1625 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1629 /* At least TC0 is enabled in case of non-DCB case */
1633 vsi->tc_config.numtc = numtc;
1634 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1635 /* Number of queues per enabled TC */
1636 qcount = vsi->alloc_queue_pairs;
1638 num_tc_qps = qcount / numtc;
1639 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1641 /* Setup queue offset/count for all TCs for given VSI */
1642 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1643 /* See if the given TC is enabled for the given VSI */
1644 if (vsi->tc_config.enabled_tc & BIT(i)) {
1648 switch (vsi->type) {
1650 qcount = min_t(int, pf->alloc_rss_size,
1654 case I40E_VSI_SRIOV:
1655 case I40E_VSI_VMDQ2:
1657 qcount = num_tc_qps;
1661 vsi->tc_config.tc_info[i].qoffset = offset;
1662 vsi->tc_config.tc_info[i].qcount = qcount;
1664 /* find the next higher power-of-2 of num queue pairs */
1667 while (num_qps && (BIT_ULL(pow) < qcount)) {
1672 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1674 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1675 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1679 /* TC is not enabled so set the offset to
1680 * default queue and allocate one queue
1683 vsi->tc_config.tc_info[i].qoffset = 0;
1684 vsi->tc_config.tc_info[i].qcount = 1;
1685 vsi->tc_config.tc_info[i].netdev_tc = 0;
1689 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1692 /* Set actual Tx/Rx queue pairs */
1693 vsi->num_queue_pairs = offset;
1694 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1695 if (vsi->req_queue_pairs > 0)
1696 vsi->num_queue_pairs = vsi->req_queue_pairs;
1697 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1698 vsi->num_queue_pairs = pf->num_lan_msix;
1701 /* Scheduler section valid can only be set for ADD VSI */
1703 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1705 ctxt->info.up_enable_bits = enabled_tc;
1707 if (vsi->type == I40E_VSI_SRIOV) {
1708 ctxt->info.mapping_flags |=
1709 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1710 for (i = 0; i < vsi->num_queue_pairs; i++)
1711 ctxt->info.queue_mapping[i] =
1712 cpu_to_le16(vsi->base_queue + i);
1714 ctxt->info.mapping_flags |=
1715 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1716 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1718 ctxt->info.valid_sections |= cpu_to_le16(sections);
1722 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1723 * @netdev: the netdevice
1724 * @addr: address to add
1726 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1727 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1729 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1731 struct i40e_netdev_priv *np = netdev_priv(netdev);
1732 struct i40e_vsi *vsi = np->vsi;
1734 if (i40e_add_mac_filter(vsi, addr))
1741 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1742 * @netdev: the netdevice
1743 * @addr: address to add
1745 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1746 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1748 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1750 struct i40e_netdev_priv *np = netdev_priv(netdev);
1751 struct i40e_vsi *vsi = np->vsi;
1753 i40e_del_mac_filter(vsi, addr);
1759 * i40e_set_rx_mode - NDO callback to set the netdev filters
1760 * @netdev: network interface device structure
1762 static void i40e_set_rx_mode(struct net_device *netdev)
1764 struct i40e_netdev_priv *np = netdev_priv(netdev);
1765 struct i40e_vsi *vsi = np->vsi;
1767 spin_lock_bh(&vsi->mac_filter_hash_lock);
1769 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1770 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1772 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1774 /* check for other flag changes */
1775 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1776 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1777 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1782 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1783 * @vsi: Pointer to VSI struct
1784 * @from: Pointer to list which contains MAC filter entries - changes to
1785 * those entries needs to be undone.
1787 * MAC filter entries from this list were slated for deletion.
1789 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1790 struct hlist_head *from)
1792 struct i40e_mac_filter *f;
1793 struct hlist_node *h;
1795 hlist_for_each_entry_safe(f, h, from, hlist) {
1796 u64 key = i40e_addr_to_hkey(f->macaddr);
1798 /* Move the element back into MAC filter list*/
1799 hlist_del(&f->hlist);
1800 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1805 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1806 * @vsi: Pointer to vsi struct
1807 * @from: Pointer to list which contains MAC filter entries - changes to
1808 * those entries needs to be undone.
1810 * MAC filter entries from this list were slated for addition.
1812 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1813 struct hlist_head *from)
1815 struct i40e_new_mac_filter *new;
1816 struct hlist_node *h;
1818 hlist_for_each_entry_safe(new, h, from, hlist) {
1819 /* We can simply free the wrapper structure */
1820 hlist_del(&new->hlist);
1826 * i40e_next_entry - Get the next non-broadcast filter from a list
1827 * @next: pointer to filter in list
1829 * Returns the next non-broadcast filter in the list. Required so that we
1830 * ignore broadcast filters within the list, since these are not handled via
1831 * the normal firmware update path.
1834 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
1836 hlist_for_each_entry_continue(next, hlist) {
1837 if (!is_broadcast_ether_addr(next->f->macaddr))
1845 * i40e_update_filter_state - Update filter state based on return data
1847 * @count: Number of filters added
1848 * @add_list: return data from fw
1849 * @head: pointer to first filter in current batch
1851 * MAC filter entries from list were slated to be added to device. Returns
1852 * number of successful filters. Note that 0 does NOT mean success!
1855 i40e_update_filter_state(int count,
1856 struct i40e_aqc_add_macvlan_element_data *add_list,
1857 struct i40e_new_mac_filter *add_head)
1862 for (i = 0; i < count; i++) {
1863 /* Always check status of each filter. We don't need to check
1864 * the firmware return status because we pre-set the filter
1865 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
1866 * request to the adminq. Thus, if it no longer matches then
1867 * we know the filter is active.
1869 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
1870 add_head->state = I40E_FILTER_FAILED;
1872 add_head->state = I40E_FILTER_ACTIVE;
1876 add_head = i40e_next_filter(add_head);
1885 * i40e_aqc_del_filters - Request firmware to delete a set of filters
1886 * @vsi: ptr to the VSI
1887 * @vsi_name: name to display in messages
1888 * @list: the list of filters to send to firmware
1889 * @num_del: the number of filters to delete
1890 * @retval: Set to -EIO on failure to delete
1892 * Send a request to firmware via AdminQ to delete a set of filters. Uses
1893 * *retval instead of a return value so that success does not force ret_val to
1894 * be set to 0. This ensures that a sequence of calls to this function
1895 * preserve the previous value of *retval on successful delete.
1898 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
1899 struct i40e_aqc_remove_macvlan_element_data *list,
1900 int num_del, int *retval)
1902 struct i40e_hw *hw = &vsi->back->hw;
1906 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
1907 aq_err = hw->aq.asq_last_status;
1909 /* Explicitly ignore and do not report when firmware returns ENOENT */
1910 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
1912 dev_info(&vsi->back->pdev->dev,
1913 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
1914 vsi_name, i40e_stat_str(hw, aq_ret),
1915 i40e_aq_str(hw, aq_err));
1920 * i40e_aqc_add_filters - Request firmware to add a set of filters
1921 * @vsi: ptr to the VSI
1922 * @vsi_name: name to display in messages
1923 * @list: the list of filters to send to firmware
1924 * @add_head: Position in the add hlist
1925 * @num_add: the number of filters to add
1926 * @promisc_change: set to true on exit if promiscuous mode was forced on
1928 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
1929 * promisc_changed to true if the firmware has run out of space for more
1933 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
1934 struct i40e_aqc_add_macvlan_element_data *list,
1935 struct i40e_new_mac_filter *add_head,
1936 int num_add, bool *promisc_changed)
1938 struct i40e_hw *hw = &vsi->back->hw;
1941 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
1942 aq_err = hw->aq.asq_last_status;
1943 fcnt = i40e_update_filter_state(num_add, list, add_head);
1945 if (fcnt != num_add) {
1946 *promisc_changed = true;
1947 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
1948 dev_warn(&vsi->back->pdev->dev,
1949 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
1950 i40e_aq_str(hw, aq_err),
1956 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
1957 * @vsi: pointer to the VSI
1960 * This function sets or clears the promiscuous broadcast flags for VLAN
1961 * filters in order to properly receive broadcast frames. Assumes that only
1962 * broadcast filters are passed.
1964 * Returns status indicating success or failure;
1967 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
1968 struct i40e_mac_filter *f)
1970 bool enable = f->state == I40E_FILTER_NEW;
1971 struct i40e_hw *hw = &vsi->back->hw;
1974 if (f->vlan == I40E_VLAN_ANY) {
1975 aq_ret = i40e_aq_set_vsi_broadcast(hw,
1980 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
1988 dev_warn(&vsi->back->pdev->dev,
1989 "Error %s setting broadcast promiscuous mode on %s\n",
1990 i40e_aq_str(hw, hw->aq.asq_last_status),
1997 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1998 * @vsi: ptr to the VSI
2000 * Push any outstanding VSI filter changes through the AdminQ.
2002 * Returns 0 or error value
2004 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2006 struct hlist_head tmp_add_list, tmp_del_list;
2007 struct i40e_mac_filter *f;
2008 struct i40e_new_mac_filter *new, *add_head = NULL;
2009 struct i40e_hw *hw = &vsi->back->hw;
2010 unsigned int failed_filters = 0;
2011 unsigned int vlan_filters = 0;
2012 bool promisc_changed = false;
2013 char vsi_name[16] = "PF";
2014 int filter_list_len = 0;
2015 i40e_status aq_ret = 0;
2016 u32 changed_flags = 0;
2017 struct hlist_node *h;
2026 /* empty array typed pointers, kcalloc later */
2027 struct i40e_aqc_add_macvlan_element_data *add_list;
2028 struct i40e_aqc_remove_macvlan_element_data *del_list;
2030 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2031 usleep_range(1000, 2000);
2035 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2036 vsi->current_netdev_flags = vsi->netdev->flags;
2039 INIT_HLIST_HEAD(&tmp_add_list);
2040 INIT_HLIST_HEAD(&tmp_del_list);
2042 if (vsi->type == I40E_VSI_SRIOV)
2043 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2044 else if (vsi->type != I40E_VSI_MAIN)
2045 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2047 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2048 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2050 spin_lock_bh(&vsi->mac_filter_hash_lock);
2051 /* Create a list of filters to delete. */
2052 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2053 if (f->state == I40E_FILTER_REMOVE) {
2054 /* Move the element into temporary del_list */
2055 hash_del(&f->hlist);
2056 hlist_add_head(&f->hlist, &tmp_del_list);
2058 /* Avoid counting removed filters */
2061 if (f->state == I40E_FILTER_NEW) {
2062 /* Create a temporary i40e_new_mac_filter */
2063 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2065 goto err_no_memory_locked;
2067 /* Store pointer to the real filter */
2069 new->state = f->state;
2071 /* Add it to the hash list */
2072 hlist_add_head(&new->hlist, &tmp_add_list);
2075 /* Count the number of active (current and new) VLAN
2076 * filters we have now. Does not count filters which
2077 * are marked for deletion.
2083 retval = i40e_correct_mac_vlan_filters(vsi,
2088 goto err_no_memory_locked;
2090 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2093 /* Now process 'del_list' outside the lock */
2094 if (!hlist_empty(&tmp_del_list)) {
2095 filter_list_len = hw->aq.asq_buf_size /
2096 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2097 list_size = filter_list_len *
2098 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2099 del_list = kzalloc(list_size, GFP_ATOMIC);
2103 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2106 /* handle broadcast filters by updating the broadcast
2107 * promiscuous flag and release filter list.
2109 if (is_broadcast_ether_addr(f->macaddr)) {
2110 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2112 hlist_del(&f->hlist);
2117 /* add to delete list */
2118 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2119 if (f->vlan == I40E_VLAN_ANY) {
2120 del_list[num_del].vlan_tag = 0;
2121 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2123 del_list[num_del].vlan_tag =
2124 cpu_to_le16((u16)(f->vlan));
2127 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2128 del_list[num_del].flags = cmd_flags;
2131 /* flush a full buffer */
2132 if (num_del == filter_list_len) {
2133 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2135 memset(del_list, 0, list_size);
2138 /* Release memory for MAC filter entries which were
2139 * synced up with HW.
2141 hlist_del(&f->hlist);
2146 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2154 if (!hlist_empty(&tmp_add_list)) {
2155 /* Do all the adds now. */
2156 filter_list_len = hw->aq.asq_buf_size /
2157 sizeof(struct i40e_aqc_add_macvlan_element_data);
2158 list_size = filter_list_len *
2159 sizeof(struct i40e_aqc_add_macvlan_element_data);
2160 add_list = kzalloc(list_size, GFP_ATOMIC);
2165 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2166 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC,
2168 new->state = I40E_FILTER_FAILED;
2172 /* handle broadcast filters by updating the broadcast
2173 * promiscuous flag instead of adding a MAC filter.
2175 if (is_broadcast_ether_addr(new->f->macaddr)) {
2176 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2178 new->state = I40E_FILTER_FAILED;
2180 new->state = I40E_FILTER_ACTIVE;
2184 /* add to add array */
2188 ether_addr_copy(add_list[num_add].mac_addr,
2190 if (new->f->vlan == I40E_VLAN_ANY) {
2191 add_list[num_add].vlan_tag = 0;
2192 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2194 add_list[num_add].vlan_tag =
2195 cpu_to_le16((u16)(new->f->vlan));
2197 add_list[num_add].queue_number = 0;
2198 /* set invalid match method for later detection */
2199 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2200 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2201 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2204 /* flush a full buffer */
2205 if (num_add == filter_list_len) {
2206 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2209 memset(add_list, 0, list_size);
2214 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2215 num_add, &promisc_changed);
2217 /* Now move all of the filters from the temp add list back to
2220 spin_lock_bh(&vsi->mac_filter_hash_lock);
2221 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2222 /* Only update the state if we're still NEW */
2223 if (new->f->state == I40E_FILTER_NEW)
2224 new->f->state = new->state;
2225 hlist_del(&new->hlist);
2228 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2233 /* Determine the number of active and failed filters. */
2234 spin_lock_bh(&vsi->mac_filter_hash_lock);
2235 vsi->active_filters = 0;
2236 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2237 if (f->state == I40E_FILTER_ACTIVE)
2238 vsi->active_filters++;
2239 else if (f->state == I40E_FILTER_FAILED)
2242 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2244 /* If promiscuous mode has changed, we need to calculate a new
2245 * threshold for when we are safe to exit
2247 if (promisc_changed)
2248 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2250 /* Check if we are able to exit overflow promiscuous mode. We can
2251 * safely exit if we didn't just enter, we no longer have any failed
2252 * filters, and we have reduced filters below the threshold value.
2254 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) &&
2255 !promisc_changed && !failed_filters &&
2256 (vsi->active_filters < vsi->promisc_threshold)) {
2257 dev_info(&pf->pdev->dev,
2258 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2260 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2261 promisc_changed = true;
2262 vsi->promisc_threshold = 0;
2265 /* if the VF is not trusted do not do promisc */
2266 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2267 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2271 /* check for changes in promiscuous modes */
2272 if (changed_flags & IFF_ALLMULTI) {
2273 bool cur_multipromisc;
2275 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2276 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2281 retval = i40e_aq_rc_to_posix(aq_ret,
2282 hw->aq.asq_last_status);
2283 dev_info(&pf->pdev->dev,
2284 "set multi promisc failed on %s, err %s aq_err %s\n",
2286 i40e_stat_str(hw, aq_ret),
2287 i40e_aq_str(hw, hw->aq.asq_last_status));
2291 if ((changed_flags & IFF_PROMISC) || promisc_changed) {
2294 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2295 test_bit(__I40E_VSI_OVERFLOW_PROMISC,
2297 if ((vsi->type == I40E_VSI_MAIN) &&
2298 (pf->lan_veb != I40E_NO_VEB) &&
2299 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2300 /* set defport ON for Main VSI instead of true promisc
2301 * this way we will get all unicast/multicast and VLAN
2302 * promisc behavior but will not get VF or VMDq traffic
2303 * replicated on the Main VSI.
2305 if (pf->cur_promisc != cur_promisc) {
2306 pf->cur_promisc = cur_promisc;
2309 i40e_aq_set_default_vsi(hw,
2314 i40e_aq_clear_default_vsi(hw,
2318 retval = i40e_aq_rc_to_posix(aq_ret,
2319 hw->aq.asq_last_status);
2320 dev_info(&pf->pdev->dev,
2321 "Set default VSI failed on %s, err %s, aq_err %s\n",
2323 i40e_stat_str(hw, aq_ret),
2325 hw->aq.asq_last_status));
2329 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2336 i40e_aq_rc_to_posix(aq_ret,
2337 hw->aq.asq_last_status);
2338 dev_info(&pf->pdev->dev,
2339 "set unicast promisc failed on %s, err %s, aq_err %s\n",
2341 i40e_stat_str(hw, aq_ret),
2343 hw->aq.asq_last_status));
2345 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2351 i40e_aq_rc_to_posix(aq_ret,
2352 hw->aq.asq_last_status);
2353 dev_info(&pf->pdev->dev,
2354 "set multicast promisc failed on %s, err %s, aq_err %s\n",
2356 i40e_stat_str(hw, aq_ret),
2358 hw->aq.asq_last_status));
2361 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
2365 retval = i40e_aq_rc_to_posix(aq_ret,
2366 pf->hw.aq.asq_last_status);
2367 dev_info(&pf->pdev->dev,
2368 "set brdcast promisc failed, err %s, aq_err %s\n",
2369 i40e_stat_str(hw, aq_ret),
2371 hw->aq.asq_last_status));
2375 /* if something went wrong then set the changed flag so we try again */
2377 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2379 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2383 /* Restore elements on the temporary add and delete lists */
2384 spin_lock_bh(&vsi->mac_filter_hash_lock);
2385 err_no_memory_locked:
2386 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2387 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2388 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2390 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2391 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2396 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2397 * @pf: board private structure
2399 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2403 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2405 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2407 for (v = 0; v < pf->num_alloc_vsi; v++) {
2409 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2410 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2413 /* come back and try again later */
2414 pf->flags |= I40E_FLAG_FILTER_SYNC;
2422 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2425 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2427 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2428 return I40E_RXBUFFER_2048;
2430 return I40E_RXBUFFER_3072;
2434 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2435 * @netdev: network interface device structure
2436 * @new_mtu: new value for maximum frame size
2438 * Returns 0 on success, negative on failure
2440 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2442 struct i40e_netdev_priv *np = netdev_priv(netdev);
2443 struct i40e_vsi *vsi = np->vsi;
2444 struct i40e_pf *pf = vsi->back;
2446 if (i40e_enabled_xdp_vsi(vsi)) {
2447 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2449 if (frame_size > i40e_max_xdp_frame_size(vsi))
2453 netdev_info(netdev, "changing MTU from %d to %d\n",
2454 netdev->mtu, new_mtu);
2455 netdev->mtu = new_mtu;
2456 if (netif_running(netdev))
2457 i40e_vsi_reinit_locked(vsi);
2458 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
2459 I40E_FLAG_CLIENT_L2_CHANGE);
2464 * i40e_ioctl - Access the hwtstamp interface
2465 * @netdev: network interface device structure
2466 * @ifr: interface request data
2467 * @cmd: ioctl command
2469 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2471 struct i40e_netdev_priv *np = netdev_priv(netdev);
2472 struct i40e_pf *pf = np->vsi->back;
2476 return i40e_ptp_get_ts_config(pf, ifr);
2478 return i40e_ptp_set_ts_config(pf, ifr);
2485 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2486 * @vsi: the vsi being adjusted
2488 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2490 struct i40e_vsi_context ctxt;
2493 if ((vsi->info.valid_sections &
2494 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2495 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2496 return; /* already enabled */
2498 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2499 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2500 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2502 ctxt.seid = vsi->seid;
2503 ctxt.info = vsi->info;
2504 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2506 dev_info(&vsi->back->pdev->dev,
2507 "update vlan stripping failed, err %s aq_err %s\n",
2508 i40e_stat_str(&vsi->back->hw, ret),
2509 i40e_aq_str(&vsi->back->hw,
2510 vsi->back->hw.aq.asq_last_status));
2515 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2516 * @vsi: the vsi being adjusted
2518 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2520 struct i40e_vsi_context ctxt;
2523 if ((vsi->info.valid_sections &
2524 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2525 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2526 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2527 return; /* already disabled */
2529 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2530 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2531 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2533 ctxt.seid = vsi->seid;
2534 ctxt.info = vsi->info;
2535 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2537 dev_info(&vsi->back->pdev->dev,
2538 "update vlan stripping failed, err %s aq_err %s\n",
2539 i40e_stat_str(&vsi->back->hw, ret),
2540 i40e_aq_str(&vsi->back->hw,
2541 vsi->back->hw.aq.asq_last_status));
2546 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2547 * @netdev: network interface to be adjusted
2548 * @features: netdev features to test if VLAN offload is enabled or not
2550 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2552 struct i40e_netdev_priv *np = netdev_priv(netdev);
2553 struct i40e_vsi *vsi = np->vsi;
2555 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2556 i40e_vlan_stripping_enable(vsi);
2558 i40e_vlan_stripping_disable(vsi);
2562 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2563 * @vsi: the vsi being configured
2564 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2566 * This is a helper function for adding a new MAC/VLAN filter with the
2567 * specified VLAN for each existing MAC address already in the hash table.
2568 * This function does *not* perform any accounting to update filters based on
2571 * NOTE: this function expects to be called while under the
2572 * mac_filter_hash_lock
2574 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2576 struct i40e_mac_filter *f, *add_f;
2577 struct hlist_node *h;
2580 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2581 if (f->state == I40E_FILTER_REMOVE)
2583 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2585 dev_info(&vsi->back->pdev->dev,
2586 "Could not add vlan filter %d for %pM\n",
2596 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2597 * @vsi: the VSI being configured
2598 * @vid: VLAN id to be added
2600 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2607 /* The network stack will attempt to add VID=0, with the intention to
2608 * receive priority tagged packets with a VLAN of 0. Our HW receives
2609 * these packets by default when configured to receive untagged
2610 * packets, so we don't need to add a filter for this case.
2611 * Additionally, HW interprets adding a VID=0 filter as meaning to
2612 * receive *only* tagged traffic and stops receiving untagged traffic.
2613 * Thus, we do not want to actually add a filter for VID=0
2618 /* Locked once because all functions invoked below iterates list*/
2619 spin_lock_bh(&vsi->mac_filter_hash_lock);
2620 err = i40e_add_vlan_all_mac(vsi, vid);
2621 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2625 /* schedule our worker thread which will take care of
2626 * applying the new filter changes
2628 i40e_service_event_schedule(vsi->back);
2633 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2634 * @vsi: the vsi being configured
2635 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2637 * This function should be used to remove all VLAN filters which match the
2638 * given VID. It does not schedule the service event and does not take the
2639 * mac_filter_hash_lock so it may be combined with other operations under
2640 * a single invocation of the mac_filter_hash_lock.
2642 * NOTE: this function expects to be called while under the
2643 * mac_filter_hash_lock
2645 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2647 struct i40e_mac_filter *f;
2648 struct hlist_node *h;
2651 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2653 __i40e_del_filter(vsi, f);
2658 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2659 * @vsi: the VSI being configured
2660 * @vid: VLAN id to be removed
2662 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2664 if (!vid || vsi->info.pvid)
2667 spin_lock_bh(&vsi->mac_filter_hash_lock);
2668 i40e_rm_vlan_all_mac(vsi, vid);
2669 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2671 /* schedule our worker thread which will take care of
2672 * applying the new filter changes
2674 i40e_service_event_schedule(vsi->back);
2678 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2679 * @netdev: network interface to be adjusted
2680 * @vid: vlan id to be added
2682 * net_device_ops implementation for adding vlan ids
2684 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2685 __always_unused __be16 proto, u16 vid)
2687 struct i40e_netdev_priv *np = netdev_priv(netdev);
2688 struct i40e_vsi *vsi = np->vsi;
2691 if (vid >= VLAN_N_VID)
2694 ret = i40e_vsi_add_vlan(vsi, vid);
2696 set_bit(vid, vsi->active_vlans);
2702 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2703 * @netdev: network interface to be adjusted
2704 * @vid: vlan id to be removed
2706 * net_device_ops implementation for removing vlan ids
2708 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2709 __always_unused __be16 proto, u16 vid)
2711 struct i40e_netdev_priv *np = netdev_priv(netdev);
2712 struct i40e_vsi *vsi = np->vsi;
2714 /* return code is ignored as there is nothing a user
2715 * can do about failure to remove and a log message was
2716 * already printed from the other function
2718 i40e_vsi_kill_vlan(vsi, vid);
2720 clear_bit(vid, vsi->active_vlans);
2726 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2727 * @vsi: the vsi being brought back up
2729 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2736 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2738 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2739 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2744 * i40e_vsi_add_pvid - Add pvid for the VSI
2745 * @vsi: the vsi being adjusted
2746 * @vid: the vlan id to set as a PVID
2748 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2750 struct i40e_vsi_context ctxt;
2753 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2754 vsi->info.pvid = cpu_to_le16(vid);
2755 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2756 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2757 I40E_AQ_VSI_PVLAN_EMOD_STR;
2759 ctxt.seid = vsi->seid;
2760 ctxt.info = vsi->info;
2761 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2763 dev_info(&vsi->back->pdev->dev,
2764 "add pvid failed, err %s aq_err %s\n",
2765 i40e_stat_str(&vsi->back->hw, ret),
2766 i40e_aq_str(&vsi->back->hw,
2767 vsi->back->hw.aq.asq_last_status));
2775 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2776 * @vsi: the vsi being adjusted
2778 * Just use the vlan_rx_register() service to put it back to normal
2780 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2782 i40e_vlan_stripping_disable(vsi);
2788 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2789 * @vsi: ptr to the VSI
2791 * If this function returns with an error, then it's possible one or
2792 * more of the rings is populated (while the rest are not). It is the
2793 * callers duty to clean those orphaned rings.
2795 * Return 0 on success, negative on failure
2797 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2801 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2802 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2804 if (!i40e_enabled_xdp_vsi(vsi))
2807 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2808 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
2814 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2815 * @vsi: ptr to the VSI
2817 * Free VSI's transmit software resources
2819 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2823 if (vsi->tx_rings) {
2824 for (i = 0; i < vsi->num_queue_pairs; i++)
2825 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2826 i40e_free_tx_resources(vsi->tx_rings[i]);
2829 if (vsi->xdp_rings) {
2830 for (i = 0; i < vsi->num_queue_pairs; i++)
2831 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2832 i40e_free_tx_resources(vsi->xdp_rings[i]);
2837 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2838 * @vsi: ptr to the VSI
2840 * If this function returns with an error, then it's possible one or
2841 * more of the rings is populated (while the rest are not). It is the
2842 * callers duty to clean those orphaned rings.
2844 * Return 0 on success, negative on failure
2846 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2850 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2851 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2856 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2857 * @vsi: ptr to the VSI
2859 * Free all receive software resources
2861 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2868 for (i = 0; i < vsi->num_queue_pairs; i++)
2869 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2870 i40e_free_rx_resources(vsi->rx_rings[i]);
2874 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2875 * @ring: The Tx ring to configure
2877 * This enables/disables XPS for a given Tx descriptor ring
2878 * based on the TCs enabled for the VSI that ring belongs to.
2880 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2882 struct i40e_vsi *vsi = ring->vsi;
2885 if (!ring->q_vector || !ring->netdev)
2888 if ((vsi->tc_config.numtc <= 1) &&
2889 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) {
2890 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
2891 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
2895 /* schedule our worker thread which will take care of
2896 * applying the new filter changes
2898 i40e_service_event_schedule(vsi->back);
2902 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2903 * @ring: The Tx ring to configure
2905 * Configure the Tx descriptor ring in the HMC context.
2907 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2909 struct i40e_vsi *vsi = ring->vsi;
2910 u16 pf_q = vsi->base_queue + ring->queue_index;
2911 struct i40e_hw *hw = &vsi->back->hw;
2912 struct i40e_hmc_obj_txq tx_ctx;
2913 i40e_status err = 0;
2916 /* some ATR related tx ring init */
2917 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2918 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2919 ring->atr_count = 0;
2921 ring->atr_sample_rate = 0;
2925 i40e_config_xps_tx_ring(ring);
2927 /* clear the context structure first */
2928 memset(&tx_ctx, 0, sizeof(tx_ctx));
2930 tx_ctx.new_context = 1;
2931 tx_ctx.base = (ring->dma / 128);
2932 tx_ctx.qlen = ring->count;
2933 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2934 I40E_FLAG_FD_ATR_ENABLED));
2935 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2936 /* FDIR VSI tx ring can still use RS bit and writebacks */
2937 if (vsi->type != I40E_VSI_FDIR)
2938 tx_ctx.head_wb_ena = 1;
2939 tx_ctx.head_wb_addr = ring->dma +
2940 (ring->count * sizeof(struct i40e_tx_desc));
2942 /* As part of VSI creation/update, FW allocates certain
2943 * Tx arbitration queue sets for each TC enabled for
2944 * the VSI. The FW returns the handles to these queue
2945 * sets as part of the response buffer to Add VSI,
2946 * Update VSI, etc. AQ commands. It is expected that
2947 * these queue set handles be associated with the Tx
2948 * queues by the driver as part of the TX queue context
2949 * initialization. This has to be done regardless of
2950 * DCB as by default everything is mapped to TC0.
2952 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2953 tx_ctx.rdylist_act = 0;
2955 /* clear the context in the HMC */
2956 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2958 dev_info(&vsi->back->pdev->dev,
2959 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2960 ring->queue_index, pf_q, err);
2964 /* set the context in the HMC */
2965 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2967 dev_info(&vsi->back->pdev->dev,
2968 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2969 ring->queue_index, pf_q, err);
2973 /* Now associate this queue with this PCI function */
2974 if (vsi->type == I40E_VSI_VMDQ2) {
2975 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2976 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2977 I40E_QTX_CTL_VFVM_INDX_MASK;
2979 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2982 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2983 I40E_QTX_CTL_PF_INDX_MASK);
2984 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2987 /* cache tail off for easier writes later */
2988 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2994 * i40e_configure_rx_ring - Configure a receive ring context
2995 * @ring: The Rx ring to configure
2997 * Configure the Rx descriptor ring in the HMC context.
2999 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3001 struct i40e_vsi *vsi = ring->vsi;
3002 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3003 u16 pf_q = vsi->base_queue + ring->queue_index;
3004 struct i40e_hw *hw = &vsi->back->hw;
3005 struct i40e_hmc_obj_rxq rx_ctx;
3006 i40e_status err = 0;
3008 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3010 /* clear the context structure first */
3011 memset(&rx_ctx, 0, sizeof(rx_ctx));
3013 ring->rx_buf_len = vsi->rx_buf_len;
3015 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3016 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3018 rx_ctx.base = (ring->dma / 128);
3019 rx_ctx.qlen = ring->count;
3021 /* use 32 byte descriptors */
3024 /* descriptor type is always zero
3027 rx_ctx.hsplit_0 = 0;
3029 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3030 if (hw->revision_id == 0)
3031 rx_ctx.lrxqthresh = 0;
3033 rx_ctx.lrxqthresh = 2;
3034 rx_ctx.crcstrip = 1;
3036 /* this controls whether VLAN is stripped from inner headers */
3038 /* set the prefena field to 1 because the manual says to */
3041 /* clear the context in the HMC */
3042 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3044 dev_info(&vsi->back->pdev->dev,
3045 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3046 ring->queue_index, pf_q, err);
3050 /* set the context in the HMC */
3051 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3053 dev_info(&vsi->back->pdev->dev,
3054 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3055 ring->queue_index, pf_q, err);
3059 /* configure Rx buffer alignment */
3060 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3061 clear_ring_build_skb_enabled(ring);
3063 set_ring_build_skb_enabled(ring);
3065 /* cache tail for quicker writes, and clear the reg before use */
3066 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3067 writel(0, ring->tail);
3069 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3075 * i40e_vsi_configure_tx - Configure the VSI for Tx
3076 * @vsi: VSI structure describing this set of rings and resources
3078 * Configure the Tx VSI for operation.
3080 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3085 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3086 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3088 if (!i40e_enabled_xdp_vsi(vsi))
3091 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3092 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3098 * i40e_vsi_configure_rx - Configure the VSI for Rx
3099 * @vsi: the VSI being configured
3101 * Configure the Rx VSI for operation.
3103 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3108 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3109 vsi->max_frame = I40E_MAX_RXBUFFER;
3110 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3111 #if (PAGE_SIZE < 8192)
3112 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3113 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3114 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3115 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3118 vsi->max_frame = I40E_MAX_RXBUFFER;
3119 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3123 /* set up individual rings */
3124 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3125 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3131 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3132 * @vsi: ptr to the VSI
3134 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3136 struct i40e_ring *tx_ring, *rx_ring;
3137 u16 qoffset, qcount;
3140 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3141 /* Reset the TC information */
3142 for (i = 0; i < vsi->num_queue_pairs; i++) {
3143 rx_ring = vsi->rx_rings[i];
3144 tx_ring = vsi->tx_rings[i];
3145 rx_ring->dcb_tc = 0;
3146 tx_ring->dcb_tc = 0;
3150 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3151 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3154 qoffset = vsi->tc_config.tc_info[n].qoffset;
3155 qcount = vsi->tc_config.tc_info[n].qcount;
3156 for (i = qoffset; i < (qoffset + qcount); i++) {
3157 rx_ring = vsi->rx_rings[i];
3158 tx_ring = vsi->tx_rings[i];
3159 rx_ring->dcb_tc = n;
3160 tx_ring->dcb_tc = n;
3166 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3167 * @vsi: ptr to the VSI
3169 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3172 i40e_set_rx_mode(vsi->netdev);
3176 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3177 * @vsi: Pointer to the targeted VSI
3179 * This function replays the hlist on the hw where all the SB Flow Director
3180 * filters were saved.
3182 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3184 struct i40e_fdir_filter *filter;
3185 struct i40e_pf *pf = vsi->back;
3186 struct hlist_node *node;
3188 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3191 /* Reset FDir counters as we're replaying all existing filters */
3192 pf->fd_tcp4_filter_cnt = 0;
3193 pf->fd_udp4_filter_cnt = 0;
3194 pf->fd_sctp4_filter_cnt = 0;
3195 pf->fd_ip4_filter_cnt = 0;
3197 hlist_for_each_entry_safe(filter, node,
3198 &pf->fdir_filter_list, fdir_node) {
3199 i40e_add_del_fdir(vsi, filter, true);
3204 * i40e_vsi_configure - Set up the VSI for action
3205 * @vsi: the VSI being configured
3207 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3211 i40e_set_vsi_rx_mode(vsi);
3212 i40e_restore_vlan(vsi);
3213 i40e_vsi_config_dcb_rings(vsi);
3214 err = i40e_vsi_configure_tx(vsi);
3216 err = i40e_vsi_configure_rx(vsi);
3222 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3223 * @vsi: the VSI being configured
3225 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3227 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3228 struct i40e_pf *pf = vsi->back;
3229 struct i40e_hw *hw = &pf->hw;
3234 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3235 * and PFINT_LNKLSTn registers, e.g.:
3236 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3238 qp = vsi->base_queue;
3239 vector = vsi->base_vector;
3240 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3241 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3243 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3244 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
3245 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3246 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3248 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
3249 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3250 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3252 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3253 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3255 /* Linked list for the queuepairs assigned to this vector */
3256 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3257 for (q = 0; q < q_vector->num_ringpairs; q++) {
3258 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3261 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3262 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3263 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3264 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3265 (I40E_QUEUE_TYPE_TX <<
3266 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3268 wr32(hw, I40E_QINT_RQCTL(qp), val);
3271 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3272 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3273 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3274 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3275 (I40E_QUEUE_TYPE_TX <<
3276 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3278 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3281 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3282 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3283 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3284 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3285 (I40E_QUEUE_TYPE_RX <<
3286 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3288 /* Terminate the linked list */
3289 if (q == (q_vector->num_ringpairs - 1))
3290 val |= (I40E_QUEUE_END_OF_LIST <<
3291 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3293 wr32(hw, I40E_QINT_TQCTL(qp), val);
3302 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3303 * @hw: ptr to the hardware info
3305 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3307 struct i40e_hw *hw = &pf->hw;
3310 /* clear things first */
3311 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3312 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3314 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3315 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3316 I40E_PFINT_ICR0_ENA_GRST_MASK |
3317 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3318 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3319 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3320 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3321 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3323 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3324 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3326 if (pf->flags & I40E_FLAG_PTP)
3327 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3329 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3331 /* SW_ITR_IDX = 0, but don't change INTENA */
3332 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3333 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3335 /* OTHER_ITR_IDX = 0 */
3336 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3340 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3341 * @vsi: the VSI being configured
3343 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3345 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3346 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3347 struct i40e_pf *pf = vsi->back;
3348 struct i40e_hw *hw = &pf->hw;
3351 /* set the ITR configuration */
3352 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3353 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
3354 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3355 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
3356 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
3357 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3358 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3360 i40e_enable_misc_int_causes(pf);
3362 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3363 wr32(hw, I40E_PFINT_LNKLST0, 0);
3365 /* Associate the queue pair to the vector and enable the queue int */
3366 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3367 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3368 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3369 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3371 wr32(hw, I40E_QINT_RQCTL(0), val);
3373 if (i40e_enabled_xdp_vsi(vsi)) {
3374 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3375 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3377 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3379 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3382 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3383 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3384 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3386 wr32(hw, I40E_QINT_TQCTL(0), val);
3391 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3392 * @pf: board private structure
3394 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3396 struct i40e_hw *hw = &pf->hw;
3398 wr32(hw, I40E_PFINT_DYN_CTL0,
3399 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3404 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3405 * @pf: board private structure
3406 * @clearpba: true when all pending interrupt events should be cleared
3408 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
3410 struct i40e_hw *hw = &pf->hw;
3413 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3414 (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
3415 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3417 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3422 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3423 * @irq: interrupt number
3424 * @data: pointer to a q_vector
3426 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3428 struct i40e_q_vector *q_vector = data;
3430 if (!q_vector->tx.ring && !q_vector->rx.ring)
3433 napi_schedule_irqoff(&q_vector->napi);
3439 * i40e_irq_affinity_notify - Callback for affinity changes
3440 * @notify: context as to what irq was changed
3441 * @mask: the new affinity mask
3443 * This is a callback function used by the irq_set_affinity_notifier function
3444 * so that we may register to receive changes to the irq affinity masks.
3446 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3447 const cpumask_t *mask)
3449 struct i40e_q_vector *q_vector =
3450 container_of(notify, struct i40e_q_vector, affinity_notify);
3452 cpumask_copy(&q_vector->affinity_mask, mask);
3456 * i40e_irq_affinity_release - Callback for affinity notifier release
3457 * @ref: internal core kernel usage
3459 * This is a callback function used by the irq_set_affinity_notifier function
3460 * to inform the current notification subscriber that they will no longer
3461 * receive notifications.
3463 static void i40e_irq_affinity_release(struct kref *ref) {}
3466 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3467 * @vsi: the VSI being configured
3468 * @basename: name for the vector
3470 * Allocates MSI-X vectors and requests interrupts from the kernel.
3472 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3474 int q_vectors = vsi->num_q_vectors;
3475 struct i40e_pf *pf = vsi->back;
3476 int base = vsi->base_vector;
3483 for (vector = 0; vector < q_vectors; vector++) {
3484 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3486 irq_num = pf->msix_entries[base + vector].vector;
3488 if (q_vector->tx.ring && q_vector->rx.ring) {
3489 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3490 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3492 } else if (q_vector->rx.ring) {
3493 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3494 "%s-%s-%d", basename, "rx", rx_int_idx++);
3495 } else if (q_vector->tx.ring) {
3496 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3497 "%s-%s-%d", basename, "tx", tx_int_idx++);
3499 /* skip this unused q_vector */
3502 err = request_irq(irq_num,
3508 dev_info(&pf->pdev->dev,
3509 "MSIX request_irq failed, error: %d\n", err);
3510 goto free_queue_irqs;
3513 /* register for affinity change notifications */
3514 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3515 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3516 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3517 /* Spread affinity hints out across online CPUs.
3519 * get_cpu_mask returns a static constant mask with
3520 * a permanent lifetime so it's ok to pass to
3521 * irq_set_affinity_hint without making a copy.
3523 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3524 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3527 vsi->irqs_ready = true;
3533 irq_num = pf->msix_entries[base + vector].vector;
3534 irq_set_affinity_notifier(irq_num, NULL);
3535 irq_set_affinity_hint(irq_num, NULL);
3536 free_irq(irq_num, &vsi->q_vectors[vector]);
3542 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3543 * @vsi: the VSI being un-configured
3545 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3547 struct i40e_pf *pf = vsi->back;
3548 struct i40e_hw *hw = &pf->hw;
3549 int base = vsi->base_vector;
3552 /* disable interrupt causation from each queue */
3553 for (i = 0; i < vsi->num_queue_pairs; i++) {
3556 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3557 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3558 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3560 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3561 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3562 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3564 if (!i40e_enabled_xdp_vsi(vsi))
3566 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3569 /* disable each interrupt */
3570 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3571 for (i = vsi->base_vector;
3572 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3573 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3576 for (i = 0; i < vsi->num_q_vectors; i++)
3577 synchronize_irq(pf->msix_entries[i + base].vector);
3579 /* Legacy and MSI mode - this stops all interrupt handling */
3580 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3581 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3583 synchronize_irq(pf->pdev->irq);
3588 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3589 * @vsi: the VSI being configured
3591 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3593 struct i40e_pf *pf = vsi->back;
3596 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3597 for (i = 0; i < vsi->num_q_vectors; i++)
3598 i40e_irq_dynamic_enable(vsi, i);
3600 i40e_irq_dynamic_enable_icr0(pf, true);
3603 i40e_flush(&pf->hw);
3608 * i40e_free_misc_vector - Free the vector that handles non-queue events
3609 * @pf: board private structure
3611 static void i40e_free_misc_vector(struct i40e_pf *pf)
3614 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3615 i40e_flush(&pf->hw);
3617 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3618 synchronize_irq(pf->msix_entries[0].vector);
3619 free_irq(pf->msix_entries[0].vector, pf);
3620 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3625 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3626 * @irq: interrupt number
3627 * @data: pointer to a q_vector
3629 * This is the handler used for all MSI/Legacy interrupts, and deals
3630 * with both queue and non-queue interrupts. This is also used in
3631 * MSIX mode to handle the non-queue interrupts.
3633 static irqreturn_t i40e_intr(int irq, void *data)
3635 struct i40e_pf *pf = (struct i40e_pf *)data;
3636 struct i40e_hw *hw = &pf->hw;
3637 irqreturn_t ret = IRQ_NONE;
3638 u32 icr0, icr0_remaining;
3641 icr0 = rd32(hw, I40E_PFINT_ICR0);
3642 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3644 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3645 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3648 /* if interrupt but no bits showing, must be SWINT */
3649 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3650 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3653 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3654 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3655 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3656 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3657 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3660 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3661 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3662 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3663 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3665 /* We do not have a way to disarm Queue causes while leaving
3666 * interrupt enabled for all other causes, ideally
3667 * interrupt should be disabled while we are in NAPI but
3668 * this is not a performance path and napi_schedule()
3669 * can deal with rescheduling.
3671 if (!test_bit(__I40E_DOWN, pf->state))
3672 napi_schedule_irqoff(&q_vector->napi);
3675 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3676 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3677 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
3678 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3681 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3682 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3683 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
3686 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3687 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3688 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3691 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3692 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3693 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
3694 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3695 val = rd32(hw, I40E_GLGEN_RSTAT);
3696 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3697 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3698 if (val == I40E_RESET_CORER) {
3700 } else if (val == I40E_RESET_GLOBR) {
3702 } else if (val == I40E_RESET_EMPR) {
3704 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
3708 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3709 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3710 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3711 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3712 rd32(hw, I40E_PFHMC_ERRORINFO),
3713 rd32(hw, I40E_PFHMC_ERRORDATA));
3716 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3717 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3719 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3720 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3721 i40e_ptp_tx_hwtstamp(pf);
3725 /* If a critical error is pending we have no choice but to reset the
3727 * Report and mask out any remaining unexpected interrupts.
3729 icr0_remaining = icr0 & ena_mask;
3730 if (icr0_remaining) {
3731 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3733 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3734 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3735 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3736 dev_info(&pf->pdev->dev, "device will be reset\n");
3737 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
3738 i40e_service_event_schedule(pf);
3740 ena_mask &= ~icr0_remaining;
3745 /* re-enable interrupt causes */
3746 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3747 if (!test_bit(__I40E_DOWN, pf->state)) {
3748 i40e_service_event_schedule(pf);
3749 i40e_irq_dynamic_enable_icr0(pf, false);
3756 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3757 * @tx_ring: tx ring to clean
3758 * @budget: how many cleans we're allowed
3760 * Returns true if there's any budget left (e.g. the clean is finished)
3762 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3764 struct i40e_vsi *vsi = tx_ring->vsi;
3765 u16 i = tx_ring->next_to_clean;
3766 struct i40e_tx_buffer *tx_buf;
3767 struct i40e_tx_desc *tx_desc;
3769 tx_buf = &tx_ring->tx_bi[i];
3770 tx_desc = I40E_TX_DESC(tx_ring, i);
3771 i -= tx_ring->count;
3774 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3776 /* if next_to_watch is not set then there is no work pending */
3780 /* prevent any other reads prior to eop_desc */
3781 read_barrier_depends();
3783 /* if the descriptor isn't done, no work yet to do */
3784 if (!(eop_desc->cmd_type_offset_bsz &
3785 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3788 /* clear next_to_watch to prevent false hangs */
3789 tx_buf->next_to_watch = NULL;
3791 tx_desc->buffer_addr = 0;
3792 tx_desc->cmd_type_offset_bsz = 0;
3793 /* move past filter desc */
3798 i -= tx_ring->count;
3799 tx_buf = tx_ring->tx_bi;
3800 tx_desc = I40E_TX_DESC(tx_ring, 0);
3802 /* unmap skb header data */
3803 dma_unmap_single(tx_ring->dev,
3804 dma_unmap_addr(tx_buf, dma),
3805 dma_unmap_len(tx_buf, len),
3807 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3808 kfree(tx_buf->raw_buf);
3810 tx_buf->raw_buf = NULL;
3811 tx_buf->tx_flags = 0;
3812 tx_buf->next_to_watch = NULL;
3813 dma_unmap_len_set(tx_buf, len, 0);
3814 tx_desc->buffer_addr = 0;
3815 tx_desc->cmd_type_offset_bsz = 0;
3817 /* move us past the eop_desc for start of next FD desc */
3822 i -= tx_ring->count;
3823 tx_buf = tx_ring->tx_bi;
3824 tx_desc = I40E_TX_DESC(tx_ring, 0);
3827 /* update budget accounting */
3829 } while (likely(budget));
3831 i += tx_ring->count;
3832 tx_ring->next_to_clean = i;
3834 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
3835 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
3841 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3842 * @irq: interrupt number
3843 * @data: pointer to a q_vector
3845 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3847 struct i40e_q_vector *q_vector = data;
3848 struct i40e_vsi *vsi;
3850 if (!q_vector->tx.ring)
3853 vsi = q_vector->tx.ring->vsi;
3854 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3860 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3861 * @vsi: the VSI being configured
3862 * @v_idx: vector index
3863 * @qp_idx: queue pair index
3865 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3867 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3868 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3869 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3871 tx_ring->q_vector = q_vector;
3872 tx_ring->next = q_vector->tx.ring;
3873 q_vector->tx.ring = tx_ring;
3874 q_vector->tx.count++;
3876 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
3877 if (i40e_enabled_xdp_vsi(vsi)) {
3878 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
3880 xdp_ring->q_vector = q_vector;
3881 xdp_ring->next = q_vector->tx.ring;
3882 q_vector->tx.ring = xdp_ring;
3883 q_vector->tx.count++;
3886 rx_ring->q_vector = q_vector;
3887 rx_ring->next = q_vector->rx.ring;
3888 q_vector->rx.ring = rx_ring;
3889 q_vector->rx.count++;
3893 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3894 * @vsi: the VSI being configured
3896 * This function maps descriptor rings to the queue-specific vectors
3897 * we were allotted through the MSI-X enabling code. Ideally, we'd have
3898 * one vector per queue pair, but on a constrained vector budget, we
3899 * group the queue pairs as "efficiently" as possible.
3901 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3903 int qp_remaining = vsi->num_queue_pairs;
3904 int q_vectors = vsi->num_q_vectors;
3909 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3910 * group them so there are multiple queues per vector.
3911 * It is also important to go through all the vectors available to be
3912 * sure that if we don't use all the vectors, that the remaining vectors
3913 * are cleared. This is especially important when decreasing the
3914 * number of queues in use.
3916 for (; v_start < q_vectors; v_start++) {
3917 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3919 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3921 q_vector->num_ringpairs = num_ringpairs;
3923 q_vector->rx.count = 0;
3924 q_vector->tx.count = 0;
3925 q_vector->rx.ring = NULL;
3926 q_vector->tx.ring = NULL;
3928 while (num_ringpairs--) {
3929 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
3937 * i40e_vsi_request_irq - Request IRQ from the OS
3938 * @vsi: the VSI being configured
3939 * @basename: name for the vector
3941 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3943 struct i40e_pf *pf = vsi->back;
3946 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3947 err = i40e_vsi_request_irq_msix(vsi, basename);
3948 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3949 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3952 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3956 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3961 #ifdef CONFIG_NET_POLL_CONTROLLER
3963 * i40e_netpoll - A Polling 'interrupt' handler
3964 * @netdev: network interface device structure
3966 * This is used by netconsole to send skbs without having to re-enable
3967 * interrupts. It's not called while the normal interrupt routine is executing.
3969 static void i40e_netpoll(struct net_device *netdev)
3971 struct i40e_netdev_priv *np = netdev_priv(netdev);
3972 struct i40e_vsi *vsi = np->vsi;
3973 struct i40e_pf *pf = vsi->back;
3976 /* if interface is down do nothing */
3977 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3980 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3981 for (i = 0; i < vsi->num_q_vectors; i++)
3982 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3984 i40e_intr(pf->pdev->irq, netdev);
3989 #define I40E_QTX_ENA_WAIT_COUNT 50
3992 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3993 * @pf: the PF being configured
3994 * @pf_q: the PF queue
3995 * @enable: enable or disable state of the queue
3997 * This routine will wait for the given Tx queue of the PF to reach the
3998 * enabled or disabled state.
3999 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4000 * multiple retries; else will return 0 in case of success.
4002 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4007 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4008 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4009 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4012 usleep_range(10, 20);
4014 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4021 * i40e_control_tx_q - Start or stop a particular Tx queue
4022 * @pf: the PF structure
4023 * @pf_q: the PF queue to configure
4024 * @enable: start or stop the queue
4026 * This function enables or disables a single queue. Note that any delay
4027 * required after the operation is expected to be handled by the caller of
4030 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4032 struct i40e_hw *hw = &pf->hw;
4036 /* warn the TX unit of coming changes */
4037 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4039 usleep_range(10, 20);
4041 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4042 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4043 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4044 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4046 usleep_range(1000, 2000);
4049 /* Skip if the queue is already in the requested state */
4050 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4053 /* turn on/off the queue */
4055 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4056 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4058 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4061 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4065 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4067 * @pf: the PF structure
4068 * @pf_q: the PF queue to configure
4069 * @is_xdp: true if the queue is used for XDP
4070 * @enable: start or stop the queue
4072 static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4073 bool is_xdp, bool enable)
4077 i40e_control_tx_q(pf, pf_q, enable);
4079 /* wait for the change to finish */
4080 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4082 dev_info(&pf->pdev->dev,
4083 "VSI seid %d %sTx ring %d %sable timeout\n",
4084 seid, (is_xdp ? "XDP " : ""), pf_q,
4085 (enable ? "en" : "dis"));
4092 * i40e_vsi_control_tx - Start or stop a VSI's rings
4093 * @vsi: the VSI being configured
4094 * @enable: start or stop the rings
4096 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4098 struct i40e_pf *pf = vsi->back;
4099 int i, pf_q, ret = 0;
4101 pf_q = vsi->base_queue;
4102 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4103 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4105 false /*is xdp*/, enable);
4109 if (!i40e_enabled_xdp_vsi(vsi))
4112 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4113 pf_q + vsi->alloc_queue_pairs,
4114 true /*is xdp*/, enable);
4123 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4124 * @pf: the PF being configured
4125 * @pf_q: the PF queue
4126 * @enable: enable or disable state of the queue
4128 * This routine will wait for the given Rx queue of the PF to reach the
4129 * enabled or disabled state.
4130 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4131 * multiple retries; else will return 0 in case of success.
4133 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4138 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4139 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4140 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4143 usleep_range(10, 20);
4145 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4152 * i40e_control_rx_q - Start or stop a particular Rx queue
4153 * @pf: the PF structure
4154 * @pf_q: the PF queue to configure
4155 * @enable: start or stop the queue
4157 * This function enables or disables a single queue. Note that any delay
4158 * required after the operation is expected to be handled by the caller of
4161 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4163 struct i40e_hw *hw = &pf->hw;
4167 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4168 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4169 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4170 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4172 usleep_range(1000, 2000);
4175 /* Skip if the queue is already in the requested state */
4176 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4179 /* turn on/off the queue */
4181 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4183 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4185 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4189 * i40e_vsi_control_rx - Start or stop a VSI's rings
4190 * @vsi: the VSI being configured
4191 * @enable: start or stop the rings
4193 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4195 struct i40e_pf *pf = vsi->back;
4196 int i, pf_q, ret = 0;
4198 pf_q = vsi->base_queue;
4199 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4200 i40e_control_rx_q(pf, pf_q, enable);
4202 /* wait for the change to finish */
4203 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4205 dev_info(&pf->pdev->dev,
4206 "VSI seid %d Rx ring %d %sable timeout\n",
4207 vsi->seid, pf_q, (enable ? "en" : "dis"));
4212 /* Due to HW errata, on Rx disable only, the register can indicate done
4213 * before it really is. Needs 50ms to be sure
4222 * i40e_vsi_start_rings - Start a VSI's rings
4223 * @vsi: the VSI being configured
4225 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4229 /* do rx first for enable and last for disable */
4230 ret = i40e_vsi_control_rx(vsi, true);
4233 ret = i40e_vsi_control_tx(vsi, true);
4239 * i40e_vsi_stop_rings - Stop a VSI's rings
4240 * @vsi: the VSI being configured
4242 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4244 /* When port TX is suspended, don't wait */
4245 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4246 return i40e_vsi_stop_rings_no_wait(vsi);
4248 /* do rx first for enable and last for disable
4249 * Ignore return value, we need to shutdown whatever we can
4251 i40e_vsi_control_tx(vsi, false);
4252 i40e_vsi_control_rx(vsi, false);
4256 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4257 * @vsi: the VSI being shutdown
4259 * This function stops all the rings for a VSI but does not delay to verify
4260 * that rings have been disabled. It is expected that the caller is shutting
4261 * down multiple VSIs at once and will delay together for all the VSIs after
4262 * initiating the shutdown. This is particularly useful for shutting down lots
4263 * of VFs together. Otherwise, a large delay can be incurred while configuring
4264 * each VSI in serial.
4266 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4268 struct i40e_pf *pf = vsi->back;
4271 pf_q = vsi->base_queue;
4272 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4273 i40e_control_tx_q(pf, pf_q, false);
4274 i40e_control_rx_q(pf, pf_q, false);
4279 * i40e_vsi_free_irq - Free the irq association with the OS
4280 * @vsi: the VSI being configured
4282 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4284 struct i40e_pf *pf = vsi->back;
4285 struct i40e_hw *hw = &pf->hw;
4286 int base = vsi->base_vector;
4290 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4291 if (!vsi->q_vectors)
4294 if (!vsi->irqs_ready)
4297 vsi->irqs_ready = false;
4298 for (i = 0; i < vsi->num_q_vectors; i++) {
4303 irq_num = pf->msix_entries[vector].vector;
4305 /* free only the irqs that were actually requested */
4306 if (!vsi->q_vectors[i] ||
4307 !vsi->q_vectors[i]->num_ringpairs)
4310 /* clear the affinity notifier in the IRQ descriptor */
4311 irq_set_affinity_notifier(irq_num, NULL);
4312 /* remove our suggested affinity mask for this IRQ */
4313 irq_set_affinity_hint(irq_num, NULL);
4314 synchronize_irq(irq_num);
4315 free_irq(irq_num, vsi->q_vectors[i]);
4317 /* Tear down the interrupt queue link list
4319 * We know that they come in pairs and always
4320 * the Rx first, then the Tx. To clear the
4321 * link list, stick the EOL value into the
4322 * next_q field of the registers.
4324 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4325 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4326 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4327 val |= I40E_QUEUE_END_OF_LIST
4328 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4329 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4331 while (qp != I40E_QUEUE_END_OF_LIST) {
4334 val = rd32(hw, I40E_QINT_RQCTL(qp));
4336 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4337 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4338 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4339 I40E_QINT_RQCTL_INTEVENT_MASK);
4341 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4342 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4344 wr32(hw, I40E_QINT_RQCTL(qp), val);
4346 val = rd32(hw, I40E_QINT_TQCTL(qp));
4348 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4349 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4351 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4352 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4353 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4354 I40E_QINT_TQCTL_INTEVENT_MASK);
4356 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4357 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4359 wr32(hw, I40E_QINT_TQCTL(qp), val);
4364 free_irq(pf->pdev->irq, pf);
4366 val = rd32(hw, I40E_PFINT_LNKLST0);
4367 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4368 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4369 val |= I40E_QUEUE_END_OF_LIST
4370 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4371 wr32(hw, I40E_PFINT_LNKLST0, val);
4373 val = rd32(hw, I40E_QINT_RQCTL(qp));
4374 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4375 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4376 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4377 I40E_QINT_RQCTL_INTEVENT_MASK);
4379 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4380 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4382 wr32(hw, I40E_QINT_RQCTL(qp), val);
4384 val = rd32(hw, I40E_QINT_TQCTL(qp));
4386 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4387 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4388 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4389 I40E_QINT_TQCTL_INTEVENT_MASK);
4391 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4392 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4394 wr32(hw, I40E_QINT_TQCTL(qp), val);
4399 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4400 * @vsi: the VSI being configured
4401 * @v_idx: Index of vector to be freed
4403 * This function frees the memory allocated to the q_vector. In addition if
4404 * NAPI is enabled it will delete any references to the NAPI struct prior
4405 * to freeing the q_vector.
4407 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4409 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4410 struct i40e_ring *ring;
4415 /* disassociate q_vector from rings */
4416 i40e_for_each_ring(ring, q_vector->tx)
4417 ring->q_vector = NULL;
4419 i40e_for_each_ring(ring, q_vector->rx)
4420 ring->q_vector = NULL;
4422 /* only VSI w/ an associated netdev is set up w/ NAPI */
4424 netif_napi_del(&q_vector->napi);
4426 vsi->q_vectors[v_idx] = NULL;
4428 kfree_rcu(q_vector, rcu);
4432 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4433 * @vsi: the VSI being un-configured
4435 * This frees the memory allocated to the q_vectors and
4436 * deletes references to the NAPI struct.
4438 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4442 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4443 i40e_free_q_vector(vsi, v_idx);
4447 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4448 * @pf: board private structure
4450 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4452 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4453 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4454 pci_disable_msix(pf->pdev);
4455 kfree(pf->msix_entries);
4456 pf->msix_entries = NULL;
4457 kfree(pf->irq_pile);
4458 pf->irq_pile = NULL;
4459 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4460 pci_disable_msi(pf->pdev);
4462 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4466 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4467 * @pf: board private structure
4469 * We go through and clear interrupt specific resources and reset the structure
4470 * to pre-load conditions
4472 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4476 i40e_free_misc_vector(pf);
4478 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4479 I40E_IWARP_IRQ_PILE_ID);
4481 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4482 for (i = 0; i < pf->num_alloc_vsi; i++)
4484 i40e_vsi_free_q_vectors(pf->vsi[i]);
4485 i40e_reset_interrupt_capability(pf);
4489 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4490 * @vsi: the VSI being configured
4492 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4499 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4500 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4502 if (q_vector->rx.ring || q_vector->tx.ring)
4503 napi_enable(&q_vector->napi);
4508 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4509 * @vsi: the VSI being configured
4511 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4518 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4519 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4521 if (q_vector->rx.ring || q_vector->tx.ring)
4522 napi_disable(&q_vector->napi);
4527 * i40e_vsi_close - Shut down a VSI
4528 * @vsi: the vsi to be quelled
4530 static void i40e_vsi_close(struct i40e_vsi *vsi)
4532 struct i40e_pf *pf = vsi->back;
4533 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4535 i40e_vsi_free_irq(vsi);
4536 i40e_vsi_free_tx_resources(vsi);
4537 i40e_vsi_free_rx_resources(vsi);
4538 vsi->current_netdev_flags = 0;
4539 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
4540 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4541 pf->flags |= I40E_FLAG_CLIENT_RESET;
4545 * i40e_quiesce_vsi - Pause a given VSI
4546 * @vsi: the VSI being paused
4548 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4550 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4553 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4554 if (vsi->netdev && netif_running(vsi->netdev))
4555 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4557 i40e_vsi_close(vsi);
4561 * i40e_unquiesce_vsi - Resume a given VSI
4562 * @vsi: the VSI being resumed
4564 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4566 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4569 if (vsi->netdev && netif_running(vsi->netdev))
4570 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4572 i40e_vsi_open(vsi); /* this clears the DOWN bit */
4576 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4579 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4583 for (v = 0; v < pf->num_alloc_vsi; v++) {
4585 i40e_quiesce_vsi(pf->vsi[v]);
4590 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4593 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4597 for (v = 0; v < pf->num_alloc_vsi; v++) {
4599 i40e_unquiesce_vsi(pf->vsi[v]);
4604 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4605 * @vsi: the VSI being configured
4607 * Wait until all queues on a given VSI have been disabled.
4609 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4611 struct i40e_pf *pf = vsi->back;
4614 pf_q = vsi->base_queue;
4615 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4616 /* Check and wait for the Tx queue */
4617 ret = i40e_pf_txq_wait(pf, pf_q, false);
4619 dev_info(&pf->pdev->dev,
4620 "VSI seid %d Tx ring %d disable timeout\n",
4625 if (!i40e_enabled_xdp_vsi(vsi))
4628 /* Check and wait for the XDP Tx queue */
4629 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4632 dev_info(&pf->pdev->dev,
4633 "VSI seid %d XDP Tx ring %d disable timeout\n",
4638 /* Check and wait for the Rx queue */
4639 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4641 dev_info(&pf->pdev->dev,
4642 "VSI seid %d Rx ring %d disable timeout\n",
4651 #ifdef CONFIG_I40E_DCB
4653 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4656 * This function waits for the queues to be in disabled state for all the
4657 * VSIs that are managed by this PF.
4659 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4663 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4665 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4677 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4678 * @q_idx: TX queue number
4679 * @vsi: Pointer to VSI struct
4681 * This function checks specified queue for given VSI. Detects hung condition.
4682 * We proactively detect hung TX queues by checking if interrupts are disabled
4683 * but there are pending descriptors. If it appears hung, attempt to recover
4684 * by triggering a SW interrupt.
4686 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4688 struct i40e_ring *tx_ring = NULL;
4690 u32 val, tx_pending;
4695 /* now that we have an index, find the tx_ring struct */
4696 for (i = 0; i < vsi->num_queue_pairs; i++) {
4697 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4698 if (q_idx == vsi->tx_rings[i]->queue_index) {
4699 tx_ring = vsi->tx_rings[i];
4708 /* Read interrupt register */
4709 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4711 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4712 tx_ring->vsi->base_vector - 1));
4714 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4716 tx_pending = i40e_get_tx_pending(tx_ring);
4718 /* Interrupts are disabled and TX pending is non-zero,
4719 * trigger the SW interrupt (don't wait). Worst case
4720 * there will be one extra interrupt which may result
4721 * into not cleaning any queues because queues are cleaned.
4723 if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
4724 i40e_force_wb(vsi, tx_ring->q_vector);
4728 * i40e_detect_recover_hung - Function to detect and recover hung_queues
4729 * @pf: pointer to PF struct
4731 * LAN VSI has netdev and netdev has TX queues. This function is to check
4732 * each of those TX queues if they are hung, trigger recovery by issuing
4735 static void i40e_detect_recover_hung(struct i40e_pf *pf)
4737 struct net_device *netdev;
4738 struct i40e_vsi *vsi;
4741 /* Only for LAN VSI */
4742 vsi = pf->vsi[pf->lan_vsi];
4747 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4748 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
4749 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
4752 /* Make sure type is MAIN VSI */
4753 if (vsi->type != I40E_VSI_MAIN)
4756 netdev = vsi->netdev;
4760 /* Bail out if netif_carrier is not OK */
4761 if (!netif_carrier_ok(netdev))
4764 /* Go thru' TX queues for netdev */
4765 for (i = 0; i < netdev->num_tx_queues; i++) {
4766 struct netdev_queue *q;
4768 q = netdev_get_tx_queue(netdev, i);
4770 i40e_detect_recover_hung_queue(i, vsi);
4775 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4776 * @pf: pointer to PF
4778 * Get TC map for ISCSI PF type that will include iSCSI TC
4781 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4783 struct i40e_dcb_app_priority_table app;
4784 struct i40e_hw *hw = &pf->hw;
4785 u8 enabled_tc = 1; /* TC0 is always enabled */
4787 /* Get the iSCSI APP TLV */
4788 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4790 for (i = 0; i < dcbcfg->numapps; i++) {
4791 app = dcbcfg->app[i];
4792 if (app.selector == I40E_APP_SEL_TCPIP &&
4793 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4794 tc = dcbcfg->etscfg.prioritytable[app.priority];
4795 enabled_tc |= BIT(tc);
4804 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4805 * @dcbcfg: the corresponding DCBx configuration structure
4807 * Return the number of TCs from given DCBx configuration
4809 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4811 int i, tc_unused = 0;
4815 /* Scan the ETS Config Priority Table to find
4816 * traffic class enabled for a given priority
4817 * and create a bitmask of enabled TCs
4819 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4820 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
4822 /* Now scan the bitmask to check for
4823 * contiguous TCs starting with TC0
4825 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4826 if (num_tc & BIT(i)) {
4830 pr_err("Non-contiguous TC - Disabling DCB\n");
4838 /* There is always at least TC0 */
4846 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4847 * @dcbcfg: the corresponding DCBx configuration structure
4849 * Query the current DCB configuration and return the number of
4850 * traffic classes enabled from the given DCBX config
4852 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4854 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4858 for (i = 0; i < num_tc; i++)
4859 enabled_tc |= BIT(i);
4865 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4866 * @pf: PF being queried
4868 * Return number of traffic classes enabled for the given PF
4870 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4872 struct i40e_hw *hw = &pf->hw;
4873 u8 i, enabled_tc = 1;
4875 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4877 /* If DCB is not enabled then always in single TC */
4878 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4881 /* SFP mode will be enabled for all TCs on port */
4882 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4883 return i40e_dcb_get_num_tc(dcbcfg);
4885 /* MFP mode return count of enabled TCs for this PF */
4886 if (pf->hw.func_caps.iscsi)
4887 enabled_tc = i40e_get_iscsi_tc_map(pf);
4889 return 1; /* Only TC0 */
4891 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4892 if (enabled_tc & BIT(i))
4899 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4900 * @pf: PF being queried
4902 * Return a bitmap for enabled traffic classes for this PF.
4904 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4906 /* If DCB is not enabled for this PF then just return default TC */
4907 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4908 return I40E_DEFAULT_TRAFFIC_CLASS;
4910 /* SFP mode we want PF to be enabled for all TCs */
4911 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4912 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4914 /* MFP enabled and iSCSI PF type */
4915 if (pf->hw.func_caps.iscsi)
4916 return i40e_get_iscsi_tc_map(pf);
4918 return I40E_DEFAULT_TRAFFIC_CLASS;
4922 * i40e_vsi_get_bw_info - Query VSI BW Information
4923 * @vsi: the VSI being queried
4925 * Returns 0 on success, negative value on failure
4927 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4929 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4930 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4931 struct i40e_pf *pf = vsi->back;
4932 struct i40e_hw *hw = &pf->hw;
4937 /* Get the VSI level BW configuration */
4938 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4940 dev_info(&pf->pdev->dev,
4941 "couldn't get PF vsi bw config, err %s aq_err %s\n",
4942 i40e_stat_str(&pf->hw, ret),
4943 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4947 /* Get the VSI level BW configuration per TC */
4948 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4951 dev_info(&pf->pdev->dev,
4952 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4953 i40e_stat_str(&pf->hw, ret),
4954 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4958 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4959 dev_info(&pf->pdev->dev,
4960 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4961 bw_config.tc_valid_bits,
4962 bw_ets_config.tc_valid_bits);
4963 /* Still continuing */
4966 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4967 vsi->bw_max_quanta = bw_config.max_bw;
4968 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4969 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4970 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4971 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4972 vsi->bw_ets_limit_credits[i] =
4973 le16_to_cpu(bw_ets_config.credits[i]);
4974 /* 3 bits out of 4 for each TC */
4975 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4982 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4983 * @vsi: the VSI being configured
4984 * @enabled_tc: TC bitmap
4985 * @bw_credits: BW shared credits per TC
4987 * Returns 0 on success, negative value on failure
4989 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4992 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4996 bw_data.tc_valid_bits = enabled_tc;
4997 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4998 bw_data.tc_bw_credits[i] = bw_share[i];
5000 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
5003 dev_info(&vsi->back->pdev->dev,
5004 "AQ command Config VSI BW allocation per TC failed = %d\n",
5005 vsi->back->hw.aq.asq_last_status);
5009 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5010 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5016 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5017 * @vsi: the VSI being configured
5018 * @enabled_tc: TC map to be enabled
5021 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5023 struct net_device *netdev = vsi->netdev;
5024 struct i40e_pf *pf = vsi->back;
5025 struct i40e_hw *hw = &pf->hw;
5028 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5034 netdev_reset_tc(netdev);
5038 /* Set up actual enabled TCs on the VSI */
5039 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5042 /* set per TC queues for the VSI */
5043 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5044 /* Only set TC queues for enabled tcs
5046 * e.g. For a VSI that has TC0 and TC3 enabled the
5047 * enabled_tc bitmap would be 0x00001001; the driver
5048 * will set the numtc for netdev as 2 that will be
5049 * referenced by the netdev layer as TC 0 and 1.
5051 if (vsi->tc_config.enabled_tc & BIT(i))
5052 netdev_set_tc_queue(netdev,
5053 vsi->tc_config.tc_info[i].netdev_tc,
5054 vsi->tc_config.tc_info[i].qcount,
5055 vsi->tc_config.tc_info[i].qoffset);
5058 /* Assign UP2TC map for the VSI */
5059 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5060 /* Get the actual TC# for the UP */
5061 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5062 /* Get the mapped netdev TC# for the UP */
5063 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5064 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5069 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5070 * @vsi: the VSI being configured
5071 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5073 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5074 struct i40e_vsi_context *ctxt)
5076 /* copy just the sections touched not the entire info
5077 * since not all sections are valid as returned by
5080 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5081 memcpy(&vsi->info.queue_mapping,
5082 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5083 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5084 sizeof(vsi->info.tc_mapping));
5088 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5089 * @vsi: VSI to be configured
5090 * @enabled_tc: TC bitmap
5092 * This configures a particular VSI for TCs that are mapped to the
5093 * given TC bitmap. It uses default bandwidth share for TCs across
5094 * VSIs to configure TC for a particular VSI.
5097 * It is expected that the VSI queues have been quisced before calling
5100 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5102 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5103 struct i40e_vsi_context ctxt;
5107 /* Check if enabled_tc is same as existing or new TCs */
5108 if (vsi->tc_config.enabled_tc == enabled_tc)
5111 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5112 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5113 if (enabled_tc & BIT(i))
5117 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5119 dev_info(&vsi->back->pdev->dev,
5120 "Failed configuring TC map %d for VSI %d\n",
5121 enabled_tc, vsi->seid);
5125 /* Update Queue Pairs Mapping for currently enabled UPs */
5126 ctxt.seid = vsi->seid;
5127 ctxt.pf_num = vsi->back->hw.pf_id;
5129 ctxt.uplink_seid = vsi->uplink_seid;
5130 ctxt.info = vsi->info;
5131 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5133 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5134 ctxt.info.valid_sections |=
5135 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5136 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5139 /* Update the VSI after updating the VSI queue-mapping information */
5140 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
5142 dev_info(&vsi->back->pdev->dev,
5143 "Update vsi tc config failed, err %s aq_err %s\n",
5144 i40e_stat_str(&vsi->back->hw, ret),
5145 i40e_aq_str(&vsi->back->hw,
5146 vsi->back->hw.aq.asq_last_status));
5149 /* update the local VSI info with updated queue map */
5150 i40e_vsi_update_queue_map(vsi, &ctxt);
5151 vsi->info.valid_sections = 0;
5153 /* Update current VSI BW information */
5154 ret = i40e_vsi_get_bw_info(vsi);
5156 dev_info(&vsi->back->pdev->dev,
5157 "Failed updating vsi bw info, err %s aq_err %s\n",
5158 i40e_stat_str(&vsi->back->hw, ret),
5159 i40e_aq_str(&vsi->back->hw,
5160 vsi->back->hw.aq.asq_last_status));
5164 /* Update the netdev TC setup */
5165 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5171 * i40e_veb_config_tc - Configure TCs for given VEB
5173 * @enabled_tc: TC bitmap
5175 * Configures given TC bitmap for VEB (switching) element
5177 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
5179 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
5180 struct i40e_pf *pf = veb->pf;
5184 /* No TCs or already enabled TCs just return */
5185 if (!enabled_tc || veb->enabled_tc == enabled_tc)
5188 bw_data.tc_valid_bits = enabled_tc;
5189 /* bw_data.absolute_credits is not set (relative) */
5191 /* Enable ETS TCs with equal BW Share for now */
5192 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5193 if (enabled_tc & BIT(i))
5194 bw_data.tc_bw_share_credits[i] = 1;
5197 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
5200 dev_info(&pf->pdev->dev,
5201 "VEB bw config failed, err %s aq_err %s\n",
5202 i40e_stat_str(&pf->hw, ret),
5203 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5207 /* Update the BW information */
5208 ret = i40e_veb_get_bw_info(veb);
5210 dev_info(&pf->pdev->dev,
5211 "Failed getting veb bw config, err %s aq_err %s\n",
5212 i40e_stat_str(&pf->hw, ret),
5213 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5220 #ifdef CONFIG_I40E_DCB
5222 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
5225 * Reconfigure VEB/VSIs on a given PF; it is assumed that
5226 * the caller would've quiesce all the VSIs before calling
5229 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
5235 /* Enable the TCs available on PF to all VEBs */
5236 tc_map = i40e_pf_get_tc_map(pf);
5237 for (v = 0; v < I40E_MAX_VEB; v++) {
5240 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
5242 dev_info(&pf->pdev->dev,
5243 "Failed configuring TC for VEB seid=%d\n",
5245 /* Will try to configure as many components */
5249 /* Update each VSI */
5250 for (v = 0; v < pf->num_alloc_vsi; v++) {
5254 /* - Enable all TCs for the LAN VSI
5255 * - For all others keep them at TC0 for now
5257 if (v == pf->lan_vsi)
5258 tc_map = i40e_pf_get_tc_map(pf);
5260 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
5262 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
5264 dev_info(&pf->pdev->dev,
5265 "Failed configuring TC for VSI seid=%d\n",
5267 /* Will try to configure as many components */
5269 /* Re-configure VSI vectors based on updated TC map */
5270 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
5271 if (pf->vsi[v]->netdev)
5272 i40e_dcbnl_set_all(pf->vsi[v]);
5278 * i40e_resume_port_tx - Resume port Tx
5281 * Resume a port's Tx and issue a PF reset in case of failure to
5284 static int i40e_resume_port_tx(struct i40e_pf *pf)
5286 struct i40e_hw *hw = &pf->hw;
5289 ret = i40e_aq_resume_port_tx(hw, NULL);
5291 dev_info(&pf->pdev->dev,
5292 "Resume Port Tx failed, err %s aq_err %s\n",
5293 i40e_stat_str(&pf->hw, ret),
5294 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5295 /* Schedule PF reset to recover */
5296 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
5297 i40e_service_event_schedule(pf);
5304 * i40e_init_pf_dcb - Initialize DCB configuration
5305 * @pf: PF being configured
5307 * Query the current DCB configuration and cache it
5308 * in the hardware structure
5310 static int i40e_init_pf_dcb(struct i40e_pf *pf)
5312 struct i40e_hw *hw = &pf->hw;
5315 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
5316 if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT)
5319 /* Get the initial DCB configuration */
5320 err = i40e_init_dcb(hw);
5322 /* Device/Function is not DCBX capable */
5323 if ((!hw->func_caps.dcb) ||
5324 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
5325 dev_info(&pf->pdev->dev,
5326 "DCBX offload is not supported or is disabled for this PF.\n");
5328 /* When status is not DISABLED then DCBX in FW */
5329 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
5330 DCB_CAP_DCBX_VER_IEEE;
5332 pf->flags |= I40E_FLAG_DCB_CAPABLE;
5333 /* Enable DCB tagging only when more than one TC
5334 * or explicitly disable if only one TC
5336 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5337 pf->flags |= I40E_FLAG_DCB_ENABLED;
5339 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5340 dev_dbg(&pf->pdev->dev,
5341 "DCBX offload is supported for this PF.\n");
5344 dev_info(&pf->pdev->dev,
5345 "Query for DCB configuration failed, err %s aq_err %s\n",
5346 i40e_stat_str(&pf->hw, err),
5347 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5353 #endif /* CONFIG_I40E_DCB */
5354 #define SPEED_SIZE 14
5357 * i40e_print_link_message - print link up or down
5358 * @vsi: the VSI for which link needs a message
5360 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
5362 enum i40e_aq_link_speed new_speed;
5363 struct i40e_pf *pf = vsi->back;
5364 char *speed = "Unknown";
5365 char *fc = "Unknown";
5370 new_speed = pf->hw.phy.link_info.link_speed;
5372 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
5374 vsi->current_isup = isup;
5375 vsi->current_speed = new_speed;
5377 netdev_info(vsi->netdev, "NIC Link is Down\n");
5381 /* Warn user if link speed on NPAR enabled partition is not at
5384 if (pf->hw.func_caps.npar_enable &&
5385 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
5386 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
5387 netdev_warn(vsi->netdev,
5388 "The partition detected link speed that is less than 10Gbps\n");
5390 switch (pf->hw.phy.link_info.link_speed) {
5391 case I40E_LINK_SPEED_40GB:
5394 case I40E_LINK_SPEED_20GB:
5397 case I40E_LINK_SPEED_25GB:
5400 case I40E_LINK_SPEED_10GB:
5403 case I40E_LINK_SPEED_1GB:
5406 case I40E_LINK_SPEED_100MB:
5413 switch (pf->hw.fc.current_mode) {
5417 case I40E_FC_TX_PAUSE:
5420 case I40E_FC_RX_PAUSE:
5428 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
5429 req_fec = ", Requested FEC: None";
5430 fec = ", FEC: None";
5431 an = ", Autoneg: False";
5433 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
5434 an = ", Autoneg: True";
5436 if (pf->hw.phy.link_info.fec_info &
5437 I40E_AQ_CONFIG_FEC_KR_ENA)
5438 fec = ", FEC: CL74 FC-FEC/BASE-R";
5439 else if (pf->hw.phy.link_info.fec_info &
5440 I40E_AQ_CONFIG_FEC_RS_ENA)
5441 fec = ", FEC: CL108 RS-FEC";
5443 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
5444 * both RS and FC are requested
5446 if (vsi->back->hw.phy.link_info.req_fec_info &
5447 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
5448 if (vsi->back->hw.phy.link_info.req_fec_info &
5449 I40E_AQ_REQUEST_FEC_RS)
5450 req_fec = ", Requested FEC: CL108 RS-FEC";
5452 req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
5456 netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
5457 speed, req_fec, fec, an, fc);
5461 * i40e_up_complete - Finish the last steps of bringing up a connection
5462 * @vsi: the VSI being configured
5464 static int i40e_up_complete(struct i40e_vsi *vsi)
5466 struct i40e_pf *pf = vsi->back;
5469 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5470 i40e_vsi_configure_msix(vsi);
5472 i40e_configure_msi_and_legacy(vsi);
5475 err = i40e_vsi_start_rings(vsi);
5479 clear_bit(__I40E_VSI_DOWN, vsi->state);
5480 i40e_napi_enable_all(vsi);
5481 i40e_vsi_enable_irq(vsi);
5483 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
5485 i40e_print_link_message(vsi, true);
5486 netif_tx_start_all_queues(vsi->netdev);
5487 netif_carrier_on(vsi->netdev);
5490 /* replay FDIR SB filters */
5491 if (vsi->type == I40E_VSI_FDIR) {
5492 /* reset fd counters */
5495 i40e_fdir_filter_restore(vsi);
5498 /* On the next run of the service_task, notify any clients of the new
5501 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
5502 i40e_service_event_schedule(pf);
5508 * i40e_vsi_reinit_locked - Reset the VSI
5509 * @vsi: the VSI being configured
5511 * Rebuild the ring structs after some configuration
5512 * has changed, e.g. MTU size.
5514 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5516 struct i40e_pf *pf = vsi->back;
5518 WARN_ON(in_interrupt());
5519 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
5520 usleep_range(1000, 2000);
5524 clear_bit(__I40E_CONFIG_BUSY, pf->state);
5528 * i40e_up - Bring the connection back up after being down
5529 * @vsi: the VSI being configured
5531 int i40e_up(struct i40e_vsi *vsi)
5535 err = i40e_vsi_configure(vsi);
5537 err = i40e_up_complete(vsi);
5543 * i40e_down - Shutdown the connection processing
5544 * @vsi: the VSI being stopped
5546 void i40e_down(struct i40e_vsi *vsi)
5550 /* It is assumed that the caller of this function
5551 * sets the vsi->state __I40E_VSI_DOWN bit.
5554 netif_carrier_off(vsi->netdev);
5555 netif_tx_disable(vsi->netdev);
5557 i40e_vsi_disable_irq(vsi);
5558 i40e_vsi_stop_rings(vsi);
5559 i40e_napi_disable_all(vsi);
5561 for (i = 0; i < vsi->num_queue_pairs; i++) {
5562 i40e_clean_tx_ring(vsi->tx_rings[i]);
5563 if (i40e_enabled_xdp_vsi(vsi))
5564 i40e_clean_tx_ring(vsi->xdp_rings[i]);
5565 i40e_clean_rx_ring(vsi->rx_rings[i]);
5571 * i40e_setup_tc - configure multiple traffic classes
5572 * @netdev: net device to configure
5573 * @tc: number of traffic classes to enable
5575 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5577 struct i40e_netdev_priv *np = netdev_priv(netdev);
5578 struct i40e_vsi *vsi = np->vsi;
5579 struct i40e_pf *pf = vsi->back;
5584 /* Check if DCB enabled to continue */
5585 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5586 netdev_info(netdev, "DCB is not enabled for adapter\n");
5590 /* Check if MFP enabled */
5591 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5592 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5596 /* Check whether tc count is within enabled limit */
5597 if (tc > i40e_pf_get_num_tc(pf)) {
5598 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5602 /* Generate TC map for number of tc requested */
5603 for (i = 0; i < tc; i++)
5604 enabled_tc |= BIT(i);
5606 /* Requesting same TC configuration as already enabled */
5607 if (enabled_tc == vsi->tc_config.enabled_tc)
5610 /* Quiesce VSI queues */
5611 i40e_quiesce_vsi(vsi);
5613 /* Configure VSI for enabled TCs */
5614 ret = i40e_vsi_config_tc(vsi, enabled_tc);
5616 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5622 i40e_unquiesce_vsi(vsi);
5628 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
5631 struct tc_mqprio_qopt *mqprio = type_data;
5633 if (type != TC_SETUP_MQPRIO)
5636 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
5638 return i40e_setup_tc(netdev, mqprio->num_tc);
5642 * i40e_open - Called when a network interface is made active
5643 * @netdev: network interface device structure
5645 * The open entry point is called when a network interface is made
5646 * active by the system (IFF_UP). At this point all resources needed
5647 * for transmit and receive operations are allocated, the interrupt
5648 * handler is registered with the OS, the netdev watchdog subtask is
5649 * enabled, and the stack is notified that the interface is ready.
5651 * Returns 0 on success, negative value on failure
5653 int i40e_open(struct net_device *netdev)
5655 struct i40e_netdev_priv *np = netdev_priv(netdev);
5656 struct i40e_vsi *vsi = np->vsi;
5657 struct i40e_pf *pf = vsi->back;
5660 /* disallow open during test or if eeprom is broken */
5661 if (test_bit(__I40E_TESTING, pf->state) ||
5662 test_bit(__I40E_BAD_EEPROM, pf->state))
5665 netif_carrier_off(netdev);
5667 err = i40e_vsi_open(vsi);
5671 /* configure global TSO hardware offload settings */
5672 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5673 TCP_FLAG_FIN) >> 16);
5674 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5676 TCP_FLAG_CWR) >> 16);
5677 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5679 udp_tunnel_get_rx_info(netdev);
5686 * @vsi: the VSI to open
5688 * Finish initialization of the VSI.
5690 * Returns 0 on success, negative value on failure
5692 * Note: expects to be called while under rtnl_lock()
5694 int i40e_vsi_open(struct i40e_vsi *vsi)
5696 struct i40e_pf *pf = vsi->back;
5697 char int_name[I40E_INT_NAME_STR_LEN];
5700 /* allocate descriptors */
5701 err = i40e_vsi_setup_tx_resources(vsi);
5704 err = i40e_vsi_setup_rx_resources(vsi);
5708 err = i40e_vsi_configure(vsi);
5713 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5714 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5715 err = i40e_vsi_request_irq(vsi, int_name);
5719 /* Notify the stack of the actual queue counts. */
5720 err = netif_set_real_num_tx_queues(vsi->netdev,
5721 vsi->num_queue_pairs);
5723 goto err_set_queues;
5725 err = netif_set_real_num_rx_queues(vsi->netdev,
5726 vsi->num_queue_pairs);
5728 goto err_set_queues;
5730 } else if (vsi->type == I40E_VSI_FDIR) {
5731 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5732 dev_driver_string(&pf->pdev->dev),
5733 dev_name(&pf->pdev->dev));
5734 err = i40e_vsi_request_irq(vsi, int_name);
5741 err = i40e_up_complete(vsi);
5743 goto err_up_complete;
5750 i40e_vsi_free_irq(vsi);
5752 i40e_vsi_free_rx_resources(vsi);
5754 i40e_vsi_free_tx_resources(vsi);
5755 if (vsi == pf->vsi[pf->lan_vsi])
5756 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
5762 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5763 * @pf: Pointer to PF
5765 * This function destroys the hlist where all the Flow Director
5766 * filters were saved.
5768 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5770 struct i40e_fdir_filter *filter;
5771 struct i40e_flex_pit *pit_entry, *tmp;
5772 struct hlist_node *node2;
5774 hlist_for_each_entry_safe(filter, node2,
5775 &pf->fdir_filter_list, fdir_node) {
5776 hlist_del(&filter->fdir_node);
5780 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
5781 list_del(&pit_entry->list);
5784 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
5786 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
5787 list_del(&pit_entry->list);
5790 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
5792 pf->fdir_pf_active_filters = 0;
5793 pf->fd_tcp4_filter_cnt = 0;
5794 pf->fd_udp4_filter_cnt = 0;
5795 pf->fd_sctp4_filter_cnt = 0;
5796 pf->fd_ip4_filter_cnt = 0;
5798 /* Reprogram the default input set for TCP/IPv4 */
5799 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
5800 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5801 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5803 /* Reprogram the default input set for UDP/IPv4 */
5804 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
5805 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5806 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5808 /* Reprogram the default input set for SCTP/IPv4 */
5809 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
5810 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
5811 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
5813 /* Reprogram the default input set for Other/IPv4 */
5814 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
5815 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
5819 * i40e_close - Disables a network interface
5820 * @netdev: network interface device structure
5822 * The close entry point is called when an interface is de-activated
5823 * by the OS. The hardware is still under the driver's control, but
5824 * this netdev interface is disabled.
5826 * Returns 0, this is not allowed to fail
5828 int i40e_close(struct net_device *netdev)
5830 struct i40e_netdev_priv *np = netdev_priv(netdev);
5831 struct i40e_vsi *vsi = np->vsi;
5833 i40e_vsi_close(vsi);
5839 * i40e_do_reset - Start a PF or Core Reset sequence
5840 * @pf: board private structure
5841 * @reset_flags: which reset is requested
5842 * @lock_acquired: indicates whether or not the lock has been acquired
5843 * before this function was called.
5845 * The essential difference in resets is that the PF Reset
5846 * doesn't clear the packet buffers, doesn't reset the PE
5847 * firmware, and doesn't bother the other PFs on the chip.
5849 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
5853 WARN_ON(in_interrupt());
5856 /* do the biggest reset indicated */
5857 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5859 /* Request a Global Reset
5861 * This will start the chip's countdown to the actual full
5862 * chip reset event, and a warning interrupt to be sent
5863 * to all PFs, including the requestor. Our handler
5864 * for the warning interrupt will deal with the shutdown
5865 * and recovery of the switch setup.
5867 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5868 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5869 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5870 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5872 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5874 /* Request a Core Reset
5876 * Same as Global Reset, except does *not* include the MAC/PHY
5878 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5879 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5880 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5881 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5882 i40e_flush(&pf->hw);
5884 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5886 /* Request a PF Reset
5888 * Resets only the PF-specific registers
5890 * This goes directly to the tear-down and rebuild of
5891 * the switch, since we need to do all the recovery as
5892 * for the Core Reset.
5894 dev_dbg(&pf->pdev->dev, "PFR requested\n");
5895 i40e_handle_reset_warning(pf, lock_acquired);
5897 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5900 /* Find the VSI(s) that requested a re-init */
5901 dev_info(&pf->pdev->dev,
5902 "VSI reinit requested\n");
5903 for (v = 0; v < pf->num_alloc_vsi; v++) {
5904 struct i40e_vsi *vsi = pf->vsi[v];
5907 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
5909 i40e_vsi_reinit_locked(pf->vsi[v]);
5911 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5914 /* Find the VSI(s) that needs to be brought down */
5915 dev_info(&pf->pdev->dev, "VSI down requested\n");
5916 for (v = 0; v < pf->num_alloc_vsi; v++) {
5917 struct i40e_vsi *vsi = pf->vsi[v];
5920 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
5922 set_bit(__I40E_VSI_DOWN, vsi->state);
5927 dev_info(&pf->pdev->dev,
5928 "bad reset request 0x%08x\n", reset_flags);
5932 #ifdef CONFIG_I40E_DCB
5934 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5935 * @pf: board private structure
5936 * @old_cfg: current DCB config
5937 * @new_cfg: new DCB config
5939 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5940 struct i40e_dcbx_config *old_cfg,
5941 struct i40e_dcbx_config *new_cfg)
5943 bool need_reconfig = false;
5945 /* Check if ETS configuration has changed */
5946 if (memcmp(&new_cfg->etscfg,
5948 sizeof(new_cfg->etscfg))) {
5949 /* If Priority Table has changed reconfig is needed */
5950 if (memcmp(&new_cfg->etscfg.prioritytable,
5951 &old_cfg->etscfg.prioritytable,
5952 sizeof(new_cfg->etscfg.prioritytable))) {
5953 need_reconfig = true;
5954 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5957 if (memcmp(&new_cfg->etscfg.tcbwtable,
5958 &old_cfg->etscfg.tcbwtable,
5959 sizeof(new_cfg->etscfg.tcbwtable)))
5960 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5962 if (memcmp(&new_cfg->etscfg.tsatable,
5963 &old_cfg->etscfg.tsatable,
5964 sizeof(new_cfg->etscfg.tsatable)))
5965 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5968 /* Check if PFC configuration has changed */
5969 if (memcmp(&new_cfg->pfc,
5971 sizeof(new_cfg->pfc))) {
5972 need_reconfig = true;
5973 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5976 /* Check if APP Table has changed */
5977 if (memcmp(&new_cfg->app,
5979 sizeof(new_cfg->app))) {
5980 need_reconfig = true;
5981 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5984 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
5985 return need_reconfig;
5989 * i40e_handle_lldp_event - Handle LLDP Change MIB event
5990 * @pf: board private structure
5991 * @e: event info posted on ARQ
5993 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5994 struct i40e_arq_event_info *e)
5996 struct i40e_aqc_lldp_get_mib *mib =
5997 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5998 struct i40e_hw *hw = &pf->hw;
5999 struct i40e_dcbx_config tmp_dcbx_cfg;
6000 bool need_reconfig = false;
6004 /* Not DCB capable or capability disabled */
6005 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
6008 /* Ignore if event is not for Nearest Bridge */
6009 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
6010 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
6011 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
6012 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
6015 /* Check MIB Type and return if event for Remote MIB update */
6016 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
6017 dev_dbg(&pf->pdev->dev,
6018 "LLDP event mib type %s\n", type ? "remote" : "local");
6019 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
6020 /* Update the remote cached instance and return */
6021 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
6022 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
6023 &hw->remote_dcbx_config);
6027 /* Store the old configuration */
6028 tmp_dcbx_cfg = hw->local_dcbx_config;
6030 /* Reset the old DCBx configuration data */
6031 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
6032 /* Get updated DCBX data from firmware */
6033 ret = i40e_get_dcb_config(&pf->hw);
6035 dev_info(&pf->pdev->dev,
6036 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
6037 i40e_stat_str(&pf->hw, ret),
6038 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6042 /* No change detected in DCBX configs */
6043 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
6044 sizeof(tmp_dcbx_cfg))) {
6045 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
6049 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
6050 &hw->local_dcbx_config);
6052 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
6057 /* Enable DCB tagging only when more than one TC */
6058 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6059 pf->flags |= I40E_FLAG_DCB_ENABLED;
6061 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6063 set_bit(__I40E_PORT_SUSPENDED, pf->state);
6064 /* Reconfiguration needed quiesce all VSIs */
6065 i40e_pf_quiesce_all_vsi(pf);
6067 /* Changes in configuration update VEB/VSI */
6068 i40e_dcb_reconfigure(pf);
6070 ret = i40e_resume_port_tx(pf);
6072 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
6073 /* In case of error no point in resuming VSIs */
6077 /* Wait for the PF's queues to be disabled */
6078 ret = i40e_pf_wait_queues_disabled(pf);
6080 /* Schedule PF reset to recover */
6081 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6082 i40e_service_event_schedule(pf);
6084 i40e_pf_unquiesce_all_vsi(pf);
6085 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
6086 I40E_FLAG_CLIENT_L2_CHANGE);
6092 #endif /* CONFIG_I40E_DCB */
6095 * i40e_do_reset_safe - Protected reset path for userland calls.
6096 * @pf: board private structure
6097 * @reset_flags: which reset is requested
6100 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
6103 i40e_do_reset(pf, reset_flags, true);
6108 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
6109 * @pf: board private structure
6110 * @e: event info posted on ARQ
6112 * Handler for LAN Queue Overflow Event generated by the firmware for PF
6115 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
6116 struct i40e_arq_event_info *e)
6118 struct i40e_aqc_lan_overflow *data =
6119 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
6120 u32 queue = le32_to_cpu(data->prtdcb_rupto);
6121 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
6122 struct i40e_hw *hw = &pf->hw;
6126 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
6129 /* Queue belongs to VF, find the VF and issue VF reset */
6130 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
6131 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
6132 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
6133 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
6134 vf_id -= hw->func_caps.vf_base_id;
6135 vf = &pf->vf[vf_id];
6136 i40e_vc_notify_vf_reset(vf);
6137 /* Allow VF to process pending reset notification */
6139 i40e_reset_vf(vf, false);
6144 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
6145 * @pf: board private structure
6147 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
6151 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
6152 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
6157 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
6158 * @pf: board private structure
6160 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
6164 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
6165 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
6166 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
6167 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
6172 * i40e_get_global_fd_count - Get total FD filters programmed on device
6173 * @pf: board private structure
6175 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
6179 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
6180 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
6181 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
6182 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
6187 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
6188 * @pf: board private structure
6190 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
6192 struct i40e_fdir_filter *filter;
6193 u32 fcnt_prog, fcnt_avail;
6194 struct hlist_node *node;
6196 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
6199 /* Check if we have enough room to re-enable FDir SB capability. */
6200 fcnt_prog = i40e_get_global_fd_count(pf);
6201 fcnt_avail = pf->fdir_pf_filter_count;
6202 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
6203 (pf->fd_add_err == 0) ||
6204 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
6205 if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
6206 pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED;
6207 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
6208 (I40E_DEBUG_FD & pf->hw.debug_mask))
6209 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
6213 /* We should wait for even more space before re-enabling ATR.
6214 * Additionally, we cannot enable ATR as long as we still have TCP SB
6217 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
6218 (pf->fd_tcp4_filter_cnt == 0)) {
6219 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
6220 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
6221 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
6222 (I40E_DEBUG_FD & pf->hw.debug_mask))
6223 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
6227 /* if hw had a problem adding a filter, delete it */
6228 if (pf->fd_inv > 0) {
6229 hlist_for_each_entry_safe(filter, node,
6230 &pf->fdir_filter_list, fdir_node) {
6231 if (filter->fd_id == pf->fd_inv) {
6232 hlist_del(&filter->fdir_node);
6234 pf->fdir_pf_active_filters--;
6240 #define I40E_MIN_FD_FLUSH_INTERVAL 10
6241 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
6243 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
6244 * @pf: board private structure
6246 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
6248 unsigned long min_flush_time;
6249 int flush_wait_retry = 50;
6250 bool disable_atr = false;
6254 if (!time_after(jiffies, pf->fd_flush_timestamp +
6255 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
6258 /* If the flush is happening too quick and we have mostly SB rules we
6259 * should not re-enable ATR for some time.
6261 min_flush_time = pf->fd_flush_timestamp +
6262 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
6263 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
6265 if (!(time_after(jiffies, min_flush_time)) &&
6266 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
6267 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6268 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
6272 pf->fd_flush_timestamp = jiffies;
6273 pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
6274 /* flush all filters */
6275 wr32(&pf->hw, I40E_PFQF_CTL_1,
6276 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
6277 i40e_flush(&pf->hw);
6281 /* Check FD flush status every 5-6msec */
6282 usleep_range(5000, 6000);
6283 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
6284 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
6286 } while (flush_wait_retry--);
6287 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
6288 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
6290 /* replay sideband filters */
6291 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
6292 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
6293 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
6294 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
6295 if (I40E_DEBUG_FD & pf->hw.debug_mask)
6296 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
6301 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
6302 * @pf: board private structure
6304 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
6306 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
6309 /* We can see up to 256 filter programming desc in transit if the filters are
6310 * being applied really fast; before we see the first
6311 * filter miss error on Rx queue 0. Accumulating enough error messages before
6312 * reacting will make sure we don't cause flush too often.
6314 #define I40E_MAX_FD_PROGRAM_ERROR 256
6317 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
6318 * @pf: board private structure
6320 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
6323 /* if interface is down do nothing */
6324 if (test_bit(__I40E_DOWN, pf->state))
6327 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
6328 i40e_fdir_flush_and_replay(pf);
6330 i40e_fdir_check_and_reenable(pf);
6335 * i40e_vsi_link_event - notify VSI of a link event
6336 * @vsi: vsi to be notified
6337 * @link_up: link up or down
6339 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
6341 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
6344 switch (vsi->type) {
6346 if (!vsi->netdev || !vsi->netdev_registered)
6350 netif_carrier_on(vsi->netdev);
6351 netif_tx_wake_all_queues(vsi->netdev);
6353 netif_carrier_off(vsi->netdev);
6354 netif_tx_stop_all_queues(vsi->netdev);
6358 case I40E_VSI_SRIOV:
6359 case I40E_VSI_VMDQ2:
6361 case I40E_VSI_IWARP:
6362 case I40E_VSI_MIRROR:
6364 /* there is no notification for other VSIs */
6370 * i40e_veb_link_event - notify elements on the veb of a link event
6371 * @veb: veb to be notified
6372 * @link_up: link up or down
6374 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
6379 if (!veb || !veb->pf)
6383 /* depth first... */
6384 for (i = 0; i < I40E_MAX_VEB; i++)
6385 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
6386 i40e_veb_link_event(pf->veb[i], link_up);
6388 /* ... now the local VSIs */
6389 for (i = 0; i < pf->num_alloc_vsi; i++)
6390 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
6391 i40e_vsi_link_event(pf->vsi[i], link_up);
6395 * i40e_link_event - Update netif_carrier status
6396 * @pf: board private structure
6398 static void i40e_link_event(struct i40e_pf *pf)
6400 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6401 u8 new_link_speed, old_link_speed;
6403 bool new_link, old_link;
6405 /* save off old link status information */
6406 pf->hw.phy.link_info_old = pf->hw.phy.link_info;
6408 /* set this to force the get_link_status call to refresh state */
6409 pf->hw.phy.get_link_info = true;
6411 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
6413 status = i40e_get_link_status(&pf->hw, &new_link);
6415 /* On success, disable temp link polling */
6416 if (status == I40E_SUCCESS) {
6417 if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)
6418 pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING;
6420 /* Enable link polling temporarily until i40e_get_link_status
6421 * returns I40E_SUCCESS
6423 pf->flags |= I40E_FLAG_TEMP_LINK_POLLING;
6424 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
6429 old_link_speed = pf->hw.phy.link_info_old.link_speed;
6430 new_link_speed = pf->hw.phy.link_info.link_speed;
6432 if (new_link == old_link &&
6433 new_link_speed == old_link_speed &&
6434 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
6435 new_link == netif_carrier_ok(vsi->netdev)))
6438 i40e_print_link_message(vsi, new_link);
6440 /* Notify the base of the switch tree connected to
6441 * the link. Floating VEBs are not notified.
6443 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6444 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
6446 i40e_vsi_link_event(vsi, new_link);
6449 i40e_vc_notify_link_state(pf);
6451 if (pf->flags & I40E_FLAG_PTP)
6452 i40e_ptp_set_increment(pf);
6456 * i40e_watchdog_subtask - periodic checks not using event driven response
6457 * @pf: board private structure
6459 static void i40e_watchdog_subtask(struct i40e_pf *pf)
6463 /* if interface is down do nothing */
6464 if (test_bit(__I40E_DOWN, pf->state) ||
6465 test_bit(__I40E_CONFIG_BUSY, pf->state))
6468 /* make sure we don't do these things too often */
6469 if (time_before(jiffies, (pf->service_timer_previous +
6470 pf->service_timer_period)))
6472 pf->service_timer_previous = jiffies;
6474 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
6475 (pf->flags & I40E_FLAG_TEMP_LINK_POLLING))
6476 i40e_link_event(pf);
6478 /* Update the stats for active netdevs so the network stack
6479 * can look at updated numbers whenever it cares to
6481 for (i = 0; i < pf->num_alloc_vsi; i++)
6482 if (pf->vsi[i] && pf->vsi[i]->netdev)
6483 i40e_update_stats(pf->vsi[i]);
6485 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
6486 /* Update the stats for the active switching components */
6487 for (i = 0; i < I40E_MAX_VEB; i++)
6489 i40e_update_veb_stats(pf->veb[i]);
6492 i40e_ptp_rx_hang(pf);
6493 i40e_ptp_tx_hang(pf);
6497 * i40e_reset_subtask - Set up for resetting the device and driver
6498 * @pf: board private structure
6500 static void i40e_reset_subtask(struct i40e_pf *pf)
6502 u32 reset_flags = 0;
6504 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
6505 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
6506 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
6508 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
6509 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
6510 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6512 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
6513 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
6514 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
6516 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
6517 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
6518 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
6520 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
6521 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
6522 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
6525 /* If there's a recovery already waiting, it takes
6526 * precedence before starting a new reset sequence.
6528 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
6529 i40e_prep_for_reset(pf, false);
6531 i40e_rebuild(pf, false, false);
6534 /* If we're already down or resetting, just bail */
6536 !test_bit(__I40E_DOWN, pf->state) &&
6537 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
6538 i40e_do_reset(pf, reset_flags, false);
6543 * i40e_handle_link_event - Handle link event
6544 * @pf: board private structure
6545 * @e: event info posted on ARQ
6547 static void i40e_handle_link_event(struct i40e_pf *pf,
6548 struct i40e_arq_event_info *e)
6550 struct i40e_aqc_get_link_status *status =
6551 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
6553 /* Do a new status request to re-enable LSE reporting
6554 * and load new status information into the hw struct
6555 * This completely ignores any state information
6556 * in the ARQ event info, instead choosing to always
6557 * issue the AQ update link status command.
6559 i40e_link_event(pf);
6561 /* Check if module meets thermal requirements */
6562 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
6563 dev_err(&pf->pdev->dev,
6564 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
6565 dev_err(&pf->pdev->dev,
6566 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
6568 /* check for unqualified module, if link is down, suppress
6569 * the message if link was forced to be down.
6571 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
6572 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
6573 (!(status->link_info & I40E_AQ_LINK_UP)) &&
6574 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
6575 dev_err(&pf->pdev->dev,
6576 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
6577 dev_err(&pf->pdev->dev,
6578 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
6584 * i40e_clean_adminq_subtask - Clean the AdminQ rings
6585 * @pf: board private structure
6587 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6589 struct i40e_arq_event_info event;
6590 struct i40e_hw *hw = &pf->hw;
6597 /* Do not run clean AQ when PF reset fails */
6598 if (test_bit(__I40E_RESET_FAILED, pf->state))
6601 /* check for error indications */
6602 val = rd32(&pf->hw, pf->hw.aq.arq.len);
6604 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
6605 if (hw->debug_mask & I40E_DEBUG_AQ)
6606 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
6607 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6609 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
6610 if (hw->debug_mask & I40E_DEBUG_AQ)
6611 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
6612 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
6613 pf->arq_overflows++;
6615 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
6616 if (hw->debug_mask & I40E_DEBUG_AQ)
6617 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
6618 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6621 wr32(&pf->hw, pf->hw.aq.arq.len, val);
6623 val = rd32(&pf->hw, pf->hw.aq.asq.len);
6625 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
6626 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6627 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
6628 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6630 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
6631 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6632 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
6633 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6635 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
6636 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6637 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
6638 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6641 wr32(&pf->hw, pf->hw.aq.asq.len, val);
6643 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6644 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
6649 ret = i40e_clean_arq_element(hw, &event, &pending);
6650 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
6653 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6657 opcode = le16_to_cpu(event.desc.opcode);
6660 case i40e_aqc_opc_get_link_status:
6661 i40e_handle_link_event(pf, &event);
6663 case i40e_aqc_opc_send_msg_to_pf:
6664 ret = i40e_vc_process_vf_msg(pf,
6665 le16_to_cpu(event.desc.retval),
6666 le32_to_cpu(event.desc.cookie_high),
6667 le32_to_cpu(event.desc.cookie_low),
6671 case i40e_aqc_opc_lldp_update_mib:
6672 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
6673 #ifdef CONFIG_I40E_DCB
6675 ret = i40e_handle_lldp_event(pf, &event);
6677 #endif /* CONFIG_I40E_DCB */
6679 case i40e_aqc_opc_event_lan_overflow:
6680 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
6681 i40e_handle_lan_overflow_event(pf, &event);
6683 case i40e_aqc_opc_send_msg_to_peer:
6684 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6686 case i40e_aqc_opc_nvm_erase:
6687 case i40e_aqc_opc_nvm_update:
6688 case i40e_aqc_opc_oem_post_update:
6689 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
6690 "ARQ NVM operation 0x%04x completed\n",
6694 dev_info(&pf->pdev->dev,
6695 "ARQ: Unknown event 0x%04x ignored\n",
6699 } while (i++ < pf->adminq_work_limit);
6701 if (i < pf->adminq_work_limit)
6702 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
6704 /* re-enable Admin queue interrupt cause */
6705 val = rd32(hw, I40E_PFINT_ICR0_ENA);
6706 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6707 wr32(hw, I40E_PFINT_ICR0_ENA, val);
6710 kfree(event.msg_buf);
6714 * i40e_verify_eeprom - make sure eeprom is good to use
6715 * @pf: board private structure
6717 static void i40e_verify_eeprom(struct i40e_pf *pf)
6721 err = i40e_diag_eeprom_test(&pf->hw);
6723 /* retry in case of garbage read */
6724 err = i40e_diag_eeprom_test(&pf->hw);
6726 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6728 set_bit(__I40E_BAD_EEPROM, pf->state);
6732 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
6733 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6734 clear_bit(__I40E_BAD_EEPROM, pf->state);
6739 * i40e_enable_pf_switch_lb
6740 * @pf: pointer to the PF structure
6742 * enable switch loop back or die - no point in a return value
6744 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6746 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6747 struct i40e_vsi_context ctxt;
6750 ctxt.seid = pf->main_vsi_seid;
6751 ctxt.pf_num = pf->hw.pf_id;
6753 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6755 dev_info(&pf->pdev->dev,
6756 "couldn't get PF vsi config, err %s aq_err %s\n",
6757 i40e_stat_str(&pf->hw, ret),
6758 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6761 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6762 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6763 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6765 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6767 dev_info(&pf->pdev->dev,
6768 "update vsi switch failed, err %s aq_err %s\n",
6769 i40e_stat_str(&pf->hw, ret),
6770 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6775 * i40e_disable_pf_switch_lb
6776 * @pf: pointer to the PF structure
6778 * disable switch loop back or die - no point in a return value
6780 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6782 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6783 struct i40e_vsi_context ctxt;
6786 ctxt.seid = pf->main_vsi_seid;
6787 ctxt.pf_num = pf->hw.pf_id;
6789 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6791 dev_info(&pf->pdev->dev,
6792 "couldn't get PF vsi config, err %s aq_err %s\n",
6793 i40e_stat_str(&pf->hw, ret),
6794 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6797 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6798 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6799 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6801 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6803 dev_info(&pf->pdev->dev,
6804 "update vsi switch failed, err %s aq_err %s\n",
6805 i40e_stat_str(&pf->hw, ret),
6806 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6811 * i40e_config_bridge_mode - Configure the HW bridge mode
6812 * @veb: pointer to the bridge instance
6814 * Configure the loop back mode for the LAN VSI that is downlink to the
6815 * specified HW bridge instance. It is expected this function is called
6816 * when a new HW bridge is instantiated.
6818 static void i40e_config_bridge_mode(struct i40e_veb *veb)
6820 struct i40e_pf *pf = veb->pf;
6822 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
6823 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6824 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6825 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6826 i40e_disable_pf_switch_lb(pf);
6828 i40e_enable_pf_switch_lb(pf);
6832 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6833 * @veb: pointer to the VEB instance
6835 * This is a recursive function that first builds the attached VSIs then
6836 * recurses in to build the next layer of VEB. We track the connections
6837 * through our own index numbers because the seid's from the HW could
6838 * change across the reset.
6840 static int i40e_reconstitute_veb(struct i40e_veb *veb)
6842 struct i40e_vsi *ctl_vsi = NULL;
6843 struct i40e_pf *pf = veb->pf;
6847 /* build VSI that owns this VEB, temporarily attached to base VEB */
6848 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6850 pf->vsi[v]->veb_idx == veb->idx &&
6851 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6852 ctl_vsi = pf->vsi[v];
6857 dev_info(&pf->pdev->dev,
6858 "missing owner VSI for veb_idx %d\n", veb->idx);
6860 goto end_reconstitute;
6862 if (ctl_vsi != pf->vsi[pf->lan_vsi])
6863 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6864 ret = i40e_add_vsi(ctl_vsi);
6866 dev_info(&pf->pdev->dev,
6867 "rebuild of veb_idx %d owner VSI failed: %d\n",
6869 goto end_reconstitute;
6871 i40e_vsi_reset_stats(ctl_vsi);
6873 /* create the VEB in the switch and move the VSI onto the VEB */
6874 ret = i40e_add_veb(veb, ctl_vsi);
6876 goto end_reconstitute;
6878 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6879 veb->bridge_mode = BRIDGE_MODE_VEB;
6881 veb->bridge_mode = BRIDGE_MODE_VEPA;
6882 i40e_config_bridge_mode(veb);
6884 /* create the remaining VSIs attached to this VEB */
6885 for (v = 0; v < pf->num_alloc_vsi; v++) {
6886 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6889 if (pf->vsi[v]->veb_idx == veb->idx) {
6890 struct i40e_vsi *vsi = pf->vsi[v];
6892 vsi->uplink_seid = veb->seid;
6893 ret = i40e_add_vsi(vsi);
6895 dev_info(&pf->pdev->dev,
6896 "rebuild of vsi_idx %d failed: %d\n",
6898 goto end_reconstitute;
6900 i40e_vsi_reset_stats(vsi);
6904 /* create any VEBs attached to this VEB - RECURSION */
6905 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6906 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6907 pf->veb[veb_idx]->uplink_seid = veb->seid;
6908 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6919 * i40e_get_capabilities - get info about the HW
6920 * @pf: the PF struct
6922 static int i40e_get_capabilities(struct i40e_pf *pf)
6924 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6929 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6931 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6935 /* this loads the data into the hw struct for us */
6936 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6938 i40e_aqc_opc_list_func_capabilities,
6940 /* data loaded, buffer no longer needed */
6943 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6944 /* retry with a larger buffer */
6945 buf_len = data_size;
6946 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6947 dev_info(&pf->pdev->dev,
6948 "capability discovery failed, err %s aq_err %s\n",
6949 i40e_stat_str(&pf->hw, err),
6950 i40e_aq_str(&pf->hw,
6951 pf->hw.aq.asq_last_status));
6956 if (pf->hw.debug_mask & I40E_DEBUG_USER)
6957 dev_info(&pf->pdev->dev,
6958 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6959 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6960 pf->hw.func_caps.num_msix_vectors,
6961 pf->hw.func_caps.num_msix_vectors_vf,
6962 pf->hw.func_caps.fd_filters_guaranteed,
6963 pf->hw.func_caps.fd_filters_best_effort,
6964 pf->hw.func_caps.num_tx_qp,
6965 pf->hw.func_caps.num_vsis);
6967 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6968 + pf->hw.func_caps.num_vfs)
6969 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6970 dev_info(&pf->pdev->dev,
6971 "got num_vsis %d, setting num_vsis to %d\n",
6972 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6973 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6979 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6982 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6983 * @pf: board private structure
6985 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6987 struct i40e_vsi *vsi;
6989 /* quick workaround for an NVM issue that leaves a critical register
6992 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6993 static const u32 hkey[] = {
6994 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6995 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6996 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
7000 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
7001 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
7004 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
7007 /* find existing VSI and see if it needs configuring */
7008 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
7010 /* create a new VSI if none exists */
7012 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
7013 pf->vsi[pf->lan_vsi]->seid, 0);
7015 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
7016 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7021 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
7025 * i40e_fdir_teardown - release the Flow Director resources
7026 * @pf: board private structure
7028 static void i40e_fdir_teardown(struct i40e_pf *pf)
7030 struct i40e_vsi *vsi;
7032 i40e_fdir_filter_exit(pf);
7033 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
7035 i40e_vsi_release(vsi);
7039 * i40e_prep_for_reset - prep for the core to reset
7040 * @pf: board private structure
7041 * @lock_acquired: indicates whether or not the lock has been acquired
7042 * before this function was called.
7044 * Close up the VFs and other things in prep for PF Reset.
7046 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
7048 struct i40e_hw *hw = &pf->hw;
7049 i40e_status ret = 0;
7052 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
7053 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
7055 if (i40e_check_asq_alive(&pf->hw))
7056 i40e_vc_notify_reset(pf);
7058 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
7060 /* quiesce the VSIs and their queues that are not already DOWN */
7061 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
7064 i40e_pf_quiesce_all_vsi(pf);
7068 for (v = 0; v < pf->num_alloc_vsi; v++) {
7070 pf->vsi[v]->seid = 0;
7073 i40e_shutdown_adminq(&pf->hw);
7075 /* call shutdown HMC */
7076 if (hw->hmc.hmc_obj) {
7077 ret = i40e_shutdown_lan_hmc(hw);
7079 dev_warn(&pf->pdev->dev,
7080 "shutdown_lan_hmc failed: %d\n", ret);
7085 * i40e_send_version - update firmware with driver version
7088 static void i40e_send_version(struct i40e_pf *pf)
7090 struct i40e_driver_version dv;
7092 dv.major_version = DRV_VERSION_MAJOR;
7093 dv.minor_version = DRV_VERSION_MINOR;
7094 dv.build_version = DRV_VERSION_BUILD;
7095 dv.subbuild_version = 0;
7096 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
7097 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
7101 * i40e_get_oem_version - get OEM specific version information
7102 * @hw: pointer to the hardware structure
7104 static void i40e_get_oem_version(struct i40e_hw *hw)
7106 u16 block_offset = 0xffff;
7107 u16 block_length = 0;
7108 u16 capabilities = 0;
7112 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
7113 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
7114 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
7115 #define I40E_NVM_OEM_GEN_OFFSET 0x02
7116 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
7117 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
7118 #define I40E_NVM_OEM_LENGTH 3
7120 /* Check if pointer to OEM version block is valid. */
7121 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
7122 if (block_offset == 0xffff)
7125 /* Check if OEM version block has correct length. */
7126 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
7128 if (block_length < I40E_NVM_OEM_LENGTH)
7131 /* Check if OEM version format is as expected. */
7132 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
7134 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
7137 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
7139 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
7141 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
7142 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
7146 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
7147 * @pf: board private structure
7149 static int i40e_reset(struct i40e_pf *pf)
7151 struct i40e_hw *hw = &pf->hw;
7154 ret = i40e_pf_reset(hw);
7156 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
7157 set_bit(__I40E_RESET_FAILED, pf->state);
7158 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
7166 * i40e_rebuild - rebuild using a saved config
7167 * @pf: board private structure
7168 * @reinit: if the Main VSI needs to re-initialized.
7169 * @lock_acquired: indicates whether or not the lock has been acquired
7170 * before this function was called.
7172 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
7174 struct i40e_hw *hw = &pf->hw;
7175 u8 set_fc_aq_fail = 0;
7180 if (test_bit(__I40E_DOWN, pf->state))
7181 goto clear_recovery;
7182 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
7184 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
7185 ret = i40e_init_adminq(&pf->hw);
7187 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
7188 i40e_stat_str(&pf->hw, ret),
7189 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7190 goto clear_recovery;
7192 i40e_get_oem_version(&pf->hw);
7194 /* re-verify the eeprom if we just had an EMP reset */
7195 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
7196 i40e_verify_eeprom(pf);
7198 i40e_clear_pxe_mode(hw);
7199 ret = i40e_get_capabilities(pf);
7201 goto end_core_reset;
7203 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
7204 hw->func_caps.num_rx_qp, 0, 0);
7206 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
7207 goto end_core_reset;
7209 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
7211 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
7212 goto end_core_reset;
7215 #ifdef CONFIG_I40E_DCB
7216 ret = i40e_init_pf_dcb(pf);
7218 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
7219 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
7220 /* Continue without DCB enabled */
7222 #endif /* CONFIG_I40E_DCB */
7223 /* do basic switch setup */
7226 ret = i40e_setup_pf_switch(pf, reinit);
7230 /* The driver only wants link up/down and module qualification
7231 * reports from firmware. Note the negative logic.
7233 ret = i40e_aq_set_phy_int_mask(&pf->hw,
7234 ~(I40E_AQ_EVENT_LINK_UPDOWN |
7235 I40E_AQ_EVENT_MEDIA_NA |
7236 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
7238 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
7239 i40e_stat_str(&pf->hw, ret),
7240 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7242 /* make sure our flow control settings are restored */
7243 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
7245 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
7246 i40e_stat_str(&pf->hw, ret),
7247 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7249 /* Rebuild the VSIs and VEBs that existed before reset.
7250 * They are still in our local switch element arrays, so only
7251 * need to rebuild the switch model in the HW.
7253 * If there were VEBs but the reconstitution failed, we'll try
7254 * try to recover minimal use by getting the basic PF VSI working.
7256 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
7257 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
7258 /* find the one VEB connected to the MAC, and find orphans */
7259 for (v = 0; v < I40E_MAX_VEB; v++) {
7263 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
7264 pf->veb[v]->uplink_seid == 0) {
7265 ret = i40e_reconstitute_veb(pf->veb[v]);
7270 /* If Main VEB failed, we're in deep doodoo,
7271 * so give up rebuilding the switch and set up
7272 * for minimal rebuild of PF VSI.
7273 * If orphan failed, we'll report the error
7274 * but try to keep going.
7276 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
7277 dev_info(&pf->pdev->dev,
7278 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
7280 pf->vsi[pf->lan_vsi]->uplink_seid
7283 } else if (pf->veb[v]->uplink_seid == 0) {
7284 dev_info(&pf->pdev->dev,
7285 "rebuild of orphan VEB failed: %d\n",
7292 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
7293 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
7294 /* no VEB, so rebuild only the Main VSI */
7295 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
7297 dev_info(&pf->pdev->dev,
7298 "rebuild of Main VSI failed: %d\n", ret);
7303 /* Reconfigure hardware for allowing smaller MSS in the case
7304 * of TSO, so that we avoid the MDD being fired and causing
7305 * a reset in the case of small MSS+TSO.
7307 #define I40E_REG_MSS 0x000E64DC
7308 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
7309 #define I40E_64BYTE_MSS 0x400000
7310 val = rd32(hw, I40E_REG_MSS);
7311 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
7312 val &= ~I40E_REG_MSS_MIN_MASK;
7313 val |= I40E_64BYTE_MSS;
7314 wr32(hw, I40E_REG_MSS, val);
7317 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
7319 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
7321 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
7322 i40e_stat_str(&pf->hw, ret),
7323 i40e_aq_str(&pf->hw,
7324 pf->hw.aq.asq_last_status));
7326 /* reinit the misc interrupt */
7327 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7328 ret = i40e_setup_misc_vector(pf);
7330 /* Add a filter to drop all Flow control frames from any VSI from being
7331 * transmitted. By doing so we stop a malicious VF from sending out
7332 * PAUSE or PFC frames and potentially controlling traffic for other
7334 * The FW can still send Flow control frames if enabled.
7336 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
7339 /* restart the VSIs that were rebuilt and running before the reset */
7340 i40e_pf_unquiesce_all_vsi(pf);
7342 /* Release the RTNL lock before we start resetting VFs */
7346 i40e_reset_all_vfs(pf, true);
7348 /* tell the firmware that we're starting */
7349 i40e_send_version(pf);
7351 /* We've already released the lock, so don't do it again */
7352 goto end_core_reset;
7358 clear_bit(__I40E_RESET_FAILED, pf->state);
7360 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
7364 * i40e_reset_and_rebuild - reset and rebuild using a saved config
7365 * @pf: board private structure
7366 * @reinit: if the Main VSI needs to re-initialized.
7367 * @lock_acquired: indicates whether or not the lock has been acquired
7368 * before this function was called.
7370 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
7374 /* Now we wait for GRST to settle out.
7375 * We don't have to delete the VEBs or VSIs from the hw switch
7376 * because the reset will make them disappear.
7378 ret = i40e_reset(pf);
7380 i40e_rebuild(pf, reinit, lock_acquired);
7384 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
7385 * @pf: board private structure
7387 * Close up the VFs and other things in prep for a Core Reset,
7388 * then get ready to rebuild the world.
7389 * @lock_acquired: indicates whether or not the lock has been acquired
7390 * before this function was called.
7392 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
7394 i40e_prep_for_reset(pf, lock_acquired);
7395 i40e_reset_and_rebuild(pf, false, lock_acquired);
7399 * i40e_handle_mdd_event
7400 * @pf: pointer to the PF structure
7402 * Called from the MDD irq handler to identify possibly malicious vfs
7404 static void i40e_handle_mdd_event(struct i40e_pf *pf)
7406 struct i40e_hw *hw = &pf->hw;
7407 bool mdd_detected = false;
7408 bool pf_mdd_detected = false;
7413 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
7416 /* find what triggered the MDD event */
7417 reg = rd32(hw, I40E_GL_MDET_TX);
7418 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
7419 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
7420 I40E_GL_MDET_TX_PF_NUM_SHIFT;
7421 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
7422 I40E_GL_MDET_TX_VF_NUM_SHIFT;
7423 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
7424 I40E_GL_MDET_TX_EVENT_SHIFT;
7425 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
7426 I40E_GL_MDET_TX_QUEUE_SHIFT) -
7427 pf->hw.func_caps.base_queue;
7428 if (netif_msg_tx_err(pf))
7429 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
7430 event, queue, pf_num, vf_num);
7431 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
7432 mdd_detected = true;
7434 reg = rd32(hw, I40E_GL_MDET_RX);
7435 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
7436 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
7437 I40E_GL_MDET_RX_FUNCTION_SHIFT;
7438 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
7439 I40E_GL_MDET_RX_EVENT_SHIFT;
7440 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
7441 I40E_GL_MDET_RX_QUEUE_SHIFT) -
7442 pf->hw.func_caps.base_queue;
7443 if (netif_msg_rx_err(pf))
7444 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
7445 event, queue, func);
7446 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
7447 mdd_detected = true;
7451 reg = rd32(hw, I40E_PF_MDET_TX);
7452 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
7453 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
7454 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
7455 pf_mdd_detected = true;
7457 reg = rd32(hw, I40E_PF_MDET_RX);
7458 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
7459 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
7460 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
7461 pf_mdd_detected = true;
7463 /* Queue belongs to the PF, initiate a reset */
7464 if (pf_mdd_detected) {
7465 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
7466 i40e_service_event_schedule(pf);
7470 /* see if one of the VFs needs its hand slapped */
7471 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
7473 reg = rd32(hw, I40E_VP_MDET_TX(i));
7474 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
7475 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
7476 vf->num_mdd_events++;
7477 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
7481 reg = rd32(hw, I40E_VP_MDET_RX(i));
7482 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
7483 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
7484 vf->num_mdd_events++;
7485 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
7489 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
7490 dev_info(&pf->pdev->dev,
7491 "Too many MDD events on VF %d, disabled\n", i);
7492 dev_info(&pf->pdev->dev,
7493 "Use PF Control I/F to re-enable the VF\n");
7494 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
7498 /* re-enable mdd interrupt cause */
7499 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
7500 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
7501 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
7502 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
7506 static const char *i40e_tunnel_name(struct i40e_udp_port_config *port)
7508 switch (port->type) {
7509 case UDP_TUNNEL_TYPE_VXLAN:
7511 case UDP_TUNNEL_TYPE_GENEVE:
7519 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
7520 * @pf: board private structure
7522 static void i40e_sync_udp_filters(struct i40e_pf *pf)
7526 /* loop through and set pending bit for all active UDP filters */
7527 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7528 if (pf->udp_ports[i].port)
7529 pf->pending_udp_bitmap |= BIT_ULL(i);
7532 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
7536 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
7537 * @pf: board private structure
7539 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
7541 struct i40e_hw *hw = &pf->hw;
7546 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
7549 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
7551 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7552 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
7553 pf->pending_udp_bitmap &= ~BIT_ULL(i);
7554 port = pf->udp_ports[i].port;
7556 ret = i40e_aq_add_udp_tunnel(hw, port,
7557 pf->udp_ports[i].type,
7560 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
7563 dev_info(&pf->pdev->dev,
7564 "%s %s port %d, index %d failed, err %s aq_err %s\n",
7565 i40e_tunnel_name(&pf->udp_ports[i]),
7566 port ? "add" : "delete",
7568 i40e_stat_str(&pf->hw, ret),
7569 i40e_aq_str(&pf->hw,
7570 pf->hw.aq.asq_last_status));
7571 pf->udp_ports[i].port = 0;
7578 * i40e_service_task - Run the driver's async subtasks
7579 * @work: pointer to work_struct containing our data
7581 static void i40e_service_task(struct work_struct *work)
7583 struct i40e_pf *pf = container_of(work,
7586 unsigned long start_time = jiffies;
7588 /* don't bother with service tasks if a reset is in progress */
7589 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
7592 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
7595 i40e_detect_recover_hung(pf);
7596 i40e_sync_filters_subtask(pf);
7597 i40e_reset_subtask(pf);
7598 i40e_handle_mdd_event(pf);
7599 i40e_vc_process_vflr_event(pf);
7600 i40e_watchdog_subtask(pf);
7601 i40e_fdir_reinit_subtask(pf);
7602 if (pf->flags & I40E_FLAG_CLIENT_RESET) {
7603 /* Client subtask will reopen next time through. */
7604 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
7605 pf->flags &= ~I40E_FLAG_CLIENT_RESET;
7607 i40e_client_subtask(pf);
7608 if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
7609 i40e_notify_client_of_l2_param_changes(
7610 pf->vsi[pf->lan_vsi]);
7611 pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
7614 i40e_sync_filters_subtask(pf);
7615 i40e_sync_udp_filters_subtask(pf);
7616 i40e_clean_adminq_subtask(pf);
7618 /* flush memory to make sure state is correct before next watchdog */
7619 smp_mb__before_atomic();
7620 clear_bit(__I40E_SERVICE_SCHED, pf->state);
7622 /* If the tasks have taken longer than one timer cycle or there
7623 * is more work to be done, reschedule the service task now
7624 * rather than wait for the timer to tick again.
7626 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
7627 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
7628 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
7629 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
7630 i40e_service_event_schedule(pf);
7634 * i40e_service_timer - timer callback
7635 * @data: pointer to PF struct
7637 static void i40e_service_timer(unsigned long data)
7639 struct i40e_pf *pf = (struct i40e_pf *)data;
7641 mod_timer(&pf->service_timer,
7642 round_jiffies(jiffies + pf->service_timer_period));
7643 i40e_service_event_schedule(pf);
7647 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
7648 * @vsi: the VSI being configured
7650 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7652 struct i40e_pf *pf = vsi->back;
7654 switch (vsi->type) {
7656 vsi->alloc_queue_pairs = pf->num_lan_qps;
7657 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7658 I40E_REQ_DESCRIPTOR_MULTIPLE);
7659 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7660 vsi->num_q_vectors = pf->num_lan_msix;
7662 vsi->num_q_vectors = 1;
7667 vsi->alloc_queue_pairs = 1;
7668 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7669 I40E_REQ_DESCRIPTOR_MULTIPLE);
7670 vsi->num_q_vectors = pf->num_fdsb_msix;
7673 case I40E_VSI_VMDQ2:
7674 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
7675 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7676 I40E_REQ_DESCRIPTOR_MULTIPLE);
7677 vsi->num_q_vectors = pf->num_vmdq_msix;
7680 case I40E_VSI_SRIOV:
7681 vsi->alloc_queue_pairs = pf->num_vf_qps;
7682 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7683 I40E_REQ_DESCRIPTOR_MULTIPLE);
7695 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
7696 * @type: VSI pointer
7697 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
7699 * On error: returns error code (negative)
7700 * On success: returns 0
7702 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
7704 struct i40e_ring **next_rings;
7708 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
7709 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
7710 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
7711 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
7714 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
7715 if (i40e_enabled_xdp_vsi(vsi)) {
7716 vsi->xdp_rings = next_rings;
7717 next_rings += vsi->alloc_queue_pairs;
7719 vsi->rx_rings = next_rings;
7721 if (alloc_qvectors) {
7722 /* allocate memory for q_vector pointers */
7723 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
7724 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
7725 if (!vsi->q_vectors) {
7733 kfree(vsi->tx_rings);
7738 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
7739 * @pf: board private structure
7740 * @type: type of VSI
7742 * On error: returns error code (negative)
7743 * On success: returns vsi index in PF (positive)
7745 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
7748 struct i40e_vsi *vsi;
7752 /* Need to protect the allocation of the VSIs at the PF level */
7753 mutex_lock(&pf->switch_mutex);
7755 /* VSI list may be fragmented if VSI creation/destruction has
7756 * been happening. We can afford to do a quick scan to look
7757 * for any free VSIs in the list.
7759 * find next empty vsi slot, looping back around if necessary
7762 while (i < pf->num_alloc_vsi && pf->vsi[i])
7764 if (i >= pf->num_alloc_vsi) {
7766 while (i < pf->next_vsi && pf->vsi[i])
7770 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
7771 vsi_idx = i; /* Found one! */
7774 goto unlock_pf; /* out of VSI slots! */
7778 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7785 set_bit(__I40E_VSI_DOWN, vsi->state);
7788 vsi->int_rate_limit = 0;
7789 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7790 pf->rss_table_size : 64;
7791 vsi->netdev_registered = false;
7792 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
7793 hash_init(vsi->mac_filter_hash);
7794 vsi->irqs_ready = false;
7796 ret = i40e_set_num_rings_in_vsi(vsi);
7800 ret = i40e_vsi_alloc_arrays(vsi, true);
7804 /* Setup default MSIX irq handler for VSI */
7805 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7807 /* Initialize VSI lock */
7808 spin_lock_init(&vsi->mac_filter_hash_lock);
7809 pf->vsi[vsi_idx] = vsi;
7814 pf->next_vsi = i - 1;
7817 mutex_unlock(&pf->switch_mutex);
7822 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
7823 * @type: VSI pointer
7824 * @free_qvectors: a bool to specify if q_vectors need to be freed.
7826 * On error: returns error code (negative)
7827 * On success: returns 0
7829 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
7831 /* free the ring and vector containers */
7832 if (free_qvectors) {
7833 kfree(vsi->q_vectors);
7834 vsi->q_vectors = NULL;
7836 kfree(vsi->tx_rings);
7837 vsi->tx_rings = NULL;
7838 vsi->rx_rings = NULL;
7839 vsi->xdp_rings = NULL;
7843 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
7845 * @vsi: Pointer to VSI structure
7847 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
7852 kfree(vsi->rss_hkey_user);
7853 vsi->rss_hkey_user = NULL;
7855 kfree(vsi->rss_lut_user);
7856 vsi->rss_lut_user = NULL;
7860 * i40e_vsi_clear - Deallocate the VSI provided
7861 * @vsi: the VSI being un-configured
7863 static int i40e_vsi_clear(struct i40e_vsi *vsi)
7874 mutex_lock(&pf->switch_mutex);
7875 if (!pf->vsi[vsi->idx]) {
7876 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7877 vsi->idx, vsi->idx, vsi, vsi->type);
7881 if (pf->vsi[vsi->idx] != vsi) {
7882 dev_err(&pf->pdev->dev,
7883 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7884 pf->vsi[vsi->idx]->idx,
7886 pf->vsi[vsi->idx]->type,
7887 vsi->idx, vsi, vsi->type);
7891 /* updates the PF for this cleared vsi */
7892 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7893 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7895 i40e_vsi_free_arrays(vsi, true);
7896 i40e_clear_rss_config_user(vsi);
7898 pf->vsi[vsi->idx] = NULL;
7899 if (vsi->idx < pf->next_vsi)
7900 pf->next_vsi = vsi->idx;
7903 mutex_unlock(&pf->switch_mutex);
7911 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7912 * @vsi: the VSI being cleaned
7914 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7918 if (vsi->tx_rings && vsi->tx_rings[0]) {
7919 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7920 kfree_rcu(vsi->tx_rings[i], rcu);
7921 vsi->tx_rings[i] = NULL;
7922 vsi->rx_rings[i] = NULL;
7924 vsi->xdp_rings[i] = NULL;
7930 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7931 * @vsi: the VSI being configured
7933 static int i40e_alloc_rings(struct i40e_vsi *vsi)
7935 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
7936 struct i40e_pf *pf = vsi->back;
7937 struct i40e_ring *ring;
7939 /* Set basic values in the rings to be used later during open() */
7940 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7941 /* allocate space for both Tx and Rx in one shot */
7942 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
7946 ring->queue_index = i;
7947 ring->reg_idx = vsi->base_queue + i;
7948 ring->ring_active = false;
7950 ring->netdev = vsi->netdev;
7951 ring->dev = &pf->pdev->dev;
7952 ring->count = vsi->num_desc;
7955 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
7956 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7957 ring->tx_itr_setting = pf->tx_itr_default;
7958 vsi->tx_rings[i] = ring++;
7960 if (!i40e_enabled_xdp_vsi(vsi))
7963 ring->queue_index = vsi->alloc_queue_pairs + i;
7964 ring->reg_idx = vsi->base_queue + ring->queue_index;
7965 ring->ring_active = false;
7967 ring->netdev = NULL;
7968 ring->dev = &pf->pdev->dev;
7969 ring->count = vsi->num_desc;
7972 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
7973 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7975 ring->tx_itr_setting = pf->tx_itr_default;
7976 vsi->xdp_rings[i] = ring++;
7979 ring->queue_index = i;
7980 ring->reg_idx = vsi->base_queue + i;
7981 ring->ring_active = false;
7983 ring->netdev = vsi->netdev;
7984 ring->dev = &pf->pdev->dev;
7985 ring->count = vsi->num_desc;
7988 ring->rx_itr_setting = pf->rx_itr_default;
7989 vsi->rx_rings[i] = ring;
7995 i40e_vsi_clear_rings(vsi);
8000 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
8001 * @pf: board private structure
8002 * @vectors: the number of MSI-X vectors to request
8004 * Returns the number of vectors reserved, or error
8006 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
8008 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
8009 I40E_MIN_MSIX, vectors);
8011 dev_info(&pf->pdev->dev,
8012 "MSI-X vector reservation failed: %d\n", vectors);
8020 * i40e_init_msix - Setup the MSIX capability
8021 * @pf: board private structure
8023 * Work with the OS to set up the MSIX vectors needed.
8025 * Returns the number of vectors reserved or negative on failure
8027 static int i40e_init_msix(struct i40e_pf *pf)
8029 struct i40e_hw *hw = &pf->hw;
8030 int cpus, extra_vectors;
8034 int iwarp_requested = 0;
8036 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
8039 /* The number of vectors we'll request will be comprised of:
8040 * - Add 1 for "other" cause for Admin Queue events, etc.
8041 * - The number of LAN queue pairs
8042 * - Queues being used for RSS.
8043 * We don't need as many as max_rss_size vectors.
8044 * use rss_size instead in the calculation since that
8045 * is governed by number of cpus in the system.
8046 * - assumes symmetric Tx/Rx pairing
8047 * - The number of VMDq pairs
8048 * - The CPU count within the NUMA node if iWARP is enabled
8049 * Once we count this up, try the request.
8051 * If we can't get what we want, we'll simplify to nearly nothing
8052 * and try again. If that still fails, we punt.
8054 vectors_left = hw->func_caps.num_msix_vectors;
8057 /* reserve one vector for miscellaneous handler */
8063 /* reserve some vectors for the main PF traffic queues. Initially we
8064 * only reserve at most 50% of the available vectors, in the case that
8065 * the number of online CPUs is large. This ensures that we can enable
8066 * extra features as well. Once we've enabled the other features, we
8067 * will use any remaining vectors to reach as close as we can to the
8068 * number of online CPUs.
8070 cpus = num_online_cpus();
8071 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
8072 vectors_left -= pf->num_lan_msix;
8074 /* reserve one vector for sideband flow director */
8075 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8077 pf->num_fdsb_msix = 1;
8081 pf->num_fdsb_msix = 0;
8085 /* can we reserve enough for iWARP? */
8086 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
8087 iwarp_requested = pf->num_iwarp_msix;
8090 pf->num_iwarp_msix = 0;
8091 else if (vectors_left < pf->num_iwarp_msix)
8092 pf->num_iwarp_msix = 1;
8093 v_budget += pf->num_iwarp_msix;
8094 vectors_left -= pf->num_iwarp_msix;
8097 /* any vectors left over go for VMDq support */
8098 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
8099 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
8100 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
8102 if (!vectors_left) {
8103 pf->num_vmdq_msix = 0;
8104 pf->num_vmdq_qps = 0;
8106 /* if we're short on vectors for what's desired, we limit
8107 * the queues per vmdq. If this is still more than are
8108 * available, the user will need to change the number of
8109 * queues/vectors used by the PF later with the ethtool
8112 if (vmdq_vecs < vmdq_vecs_wanted)
8113 pf->num_vmdq_qps = 1;
8114 pf->num_vmdq_msix = pf->num_vmdq_qps;
8116 v_budget += vmdq_vecs;
8117 vectors_left -= vmdq_vecs;
8121 /* On systems with a large number of SMP cores, we previously limited
8122 * the number of vectors for num_lan_msix to be at most 50% of the
8123 * available vectors, to allow for other features. Now, we add back
8124 * the remaining vectors. However, we ensure that the total
8125 * num_lan_msix will not exceed num_online_cpus(). To do this, we
8126 * calculate the number of vectors we can add without going over the
8127 * cap of CPUs. For systems with a small number of CPUs this will be
8130 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
8131 pf->num_lan_msix += extra_vectors;
8132 vectors_left -= extra_vectors;
8134 WARN(vectors_left < 0,
8135 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
8137 v_budget += pf->num_lan_msix;
8138 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
8140 if (!pf->msix_entries)
8143 for (i = 0; i < v_budget; i++)
8144 pf->msix_entries[i].entry = i;
8145 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
8147 if (v_actual < I40E_MIN_MSIX) {
8148 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
8149 kfree(pf->msix_entries);
8150 pf->msix_entries = NULL;
8151 pci_disable_msix(pf->pdev);
8154 } else if (v_actual == I40E_MIN_MSIX) {
8155 /* Adjust for minimal MSIX use */
8156 pf->num_vmdq_vsis = 0;
8157 pf->num_vmdq_qps = 0;
8158 pf->num_lan_qps = 1;
8159 pf->num_lan_msix = 1;
8161 } else if (!vectors_left) {
8162 /* If we have limited resources, we will start with no vectors
8163 * for the special features and then allocate vectors to some
8164 * of these features based on the policy and at the end disable
8165 * the features that did not get any vectors.
8169 dev_info(&pf->pdev->dev,
8170 "MSI-X vector limit reached, attempting to redistribute vectors\n");
8171 /* reserve the misc vector */
8174 /* Scale vector usage down */
8175 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
8176 pf->num_vmdq_vsis = 1;
8177 pf->num_vmdq_qps = 1;
8179 /* partition out the remaining vectors */
8182 pf->num_lan_msix = 1;
8185 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
8186 pf->num_lan_msix = 1;
8187 pf->num_iwarp_msix = 1;
8189 pf->num_lan_msix = 2;
8193 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
8194 pf->num_iwarp_msix = min_t(int, (vec / 3),
8196 pf->num_vmdq_vsis = min_t(int, (vec / 3),
8197 I40E_DEFAULT_NUM_VMDQ_VSI);
8199 pf->num_vmdq_vsis = min_t(int, (vec / 2),
8200 I40E_DEFAULT_NUM_VMDQ_VSI);
8202 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8203 pf->num_fdsb_msix = 1;
8206 pf->num_lan_msix = min_t(int,
8207 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
8209 pf->num_lan_qps = pf->num_lan_msix;
8214 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8215 (pf->num_fdsb_msix == 0)) {
8216 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
8217 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8219 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
8220 (pf->num_vmdq_msix == 0)) {
8221 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
8222 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
8225 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
8226 (pf->num_iwarp_msix == 0)) {
8227 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
8228 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
8230 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
8231 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
8233 pf->num_vmdq_msix * pf->num_vmdq_vsis,
8235 pf->num_iwarp_msix);
8241 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
8242 * @vsi: the VSI being configured
8243 * @v_idx: index of the vector in the vsi struct
8244 * @cpu: cpu to be used on affinity_mask
8246 * We allocate one q_vector. If allocation fails we return -ENOMEM.
8248 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
8250 struct i40e_q_vector *q_vector;
8252 /* allocate q_vector */
8253 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
8257 q_vector->vsi = vsi;
8258 q_vector->v_idx = v_idx;
8259 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
8262 netif_napi_add(vsi->netdev, &q_vector->napi,
8263 i40e_napi_poll, NAPI_POLL_WEIGHT);
8265 q_vector->rx.latency_range = I40E_LOW_LATENCY;
8266 q_vector->tx.latency_range = I40E_LOW_LATENCY;
8268 /* tie q_vector and vsi together */
8269 vsi->q_vectors[v_idx] = q_vector;
8275 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
8276 * @vsi: the VSI being configured
8278 * We allocate one q_vector per queue interrupt. If allocation fails we
8281 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
8283 struct i40e_pf *pf = vsi->back;
8284 int err, v_idx, num_q_vectors, current_cpu;
8286 /* if not MSIX, give the one vector only to the LAN VSI */
8287 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
8288 num_q_vectors = vsi->num_q_vectors;
8289 else if (vsi == pf->vsi[pf->lan_vsi])
8294 current_cpu = cpumask_first(cpu_online_mask);
8296 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
8297 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
8300 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
8301 if (unlikely(current_cpu >= nr_cpu_ids))
8302 current_cpu = cpumask_first(cpu_online_mask);
8309 i40e_free_q_vector(vsi, v_idx);
8315 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
8316 * @pf: board private structure to initialize
8318 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
8323 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
8324 vectors = i40e_init_msix(pf);
8326 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
8327 I40E_FLAG_IWARP_ENABLED |
8328 I40E_FLAG_RSS_ENABLED |
8329 I40E_FLAG_DCB_CAPABLE |
8330 I40E_FLAG_DCB_ENABLED |
8331 I40E_FLAG_SRIOV_ENABLED |
8332 I40E_FLAG_FD_SB_ENABLED |
8333 I40E_FLAG_FD_ATR_ENABLED |
8334 I40E_FLAG_VMDQ_ENABLED);
8336 /* rework the queue expectations without MSIX */
8337 i40e_determine_queue_usage(pf);
8341 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
8342 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
8343 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
8344 vectors = pci_enable_msi(pf->pdev);
8346 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
8348 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
8350 vectors = 1; /* one MSI or Legacy vector */
8353 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
8354 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
8356 /* set up vector assignment tracking */
8357 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
8358 pf->irq_pile = kzalloc(size, GFP_KERNEL);
8359 if (!pf->irq_pile) {
8360 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
8363 pf->irq_pile->num_entries = vectors;
8364 pf->irq_pile->search_hint = 0;
8366 /* track first vector for misc interrupts, ignore return */
8367 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
8374 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
8375 * @pf: private board data structure
8377 * Restore the interrupt scheme that was cleared when we suspended the
8378 * device. This should be called during resume to re-allocate the q_vectors
8379 * and reacquire IRQs.
8381 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
8385 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
8386 * scheme. We need to re-enabled them here in order to attempt to
8387 * re-acquire the MSI or MSI-X vectors
8389 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
8391 err = i40e_init_interrupt_scheme(pf);
8395 /* Now that we've re-acquired IRQs, we need to remap the vectors and
8396 * rings together again.
8398 for (i = 0; i < pf->num_alloc_vsi; i++) {
8400 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
8403 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
8407 err = i40e_setup_misc_vector(pf);
8416 i40e_vsi_free_q_vectors(pf->vsi[i]);
8421 #endif /* CONFIG_PM */
8424 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
8425 * @pf: board private structure
8427 * This sets up the handler for MSIX 0, which is used to manage the
8428 * non-queue interrupts, e.g. AdminQ and errors. This is not used
8429 * when in MSI or Legacy interrupt mode.
8431 static int i40e_setup_misc_vector(struct i40e_pf *pf)
8433 struct i40e_hw *hw = &pf->hw;
8436 /* Only request the IRQ once, the first time through. */
8437 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
8438 err = request_irq(pf->msix_entries[0].vector,
8439 i40e_intr, 0, pf->int_name, pf);
8441 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
8442 dev_info(&pf->pdev->dev,
8443 "request_irq for %s failed: %d\n",
8449 i40e_enable_misc_int_causes(pf);
8451 /* associate no queues to the misc vector */
8452 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
8453 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
8457 i40e_irq_dynamic_enable_icr0(pf, true);
8463 * i40e_config_rss_aq - Prepare for RSS using AQ commands
8464 * @vsi: vsi structure
8465 * @seed: RSS hash seed
8467 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8468 u8 *lut, u16 lut_size)
8470 struct i40e_pf *pf = vsi->back;
8471 struct i40e_hw *hw = &pf->hw;
8475 struct i40e_aqc_get_set_rss_key_data *seed_dw =
8476 (struct i40e_aqc_get_set_rss_key_data *)seed;
8477 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
8479 dev_info(&pf->pdev->dev,
8480 "Cannot set RSS key, err %s aq_err %s\n",
8481 i40e_stat_str(hw, ret),
8482 i40e_aq_str(hw, hw->aq.asq_last_status));
8487 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8489 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8491 dev_info(&pf->pdev->dev,
8492 "Cannot set RSS lut, err %s aq_err %s\n",
8493 i40e_stat_str(hw, ret),
8494 i40e_aq_str(hw, hw->aq.asq_last_status));
8502 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
8503 * @vsi: Pointer to vsi structure
8504 * @seed: Buffter to store the hash keys
8505 * @lut: Buffer to store the lookup table entries
8506 * @lut_size: Size of buffer to store the lookup table entries
8508 * Return 0 on success, negative on failure
8510 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8511 u8 *lut, u16 lut_size)
8513 struct i40e_pf *pf = vsi->back;
8514 struct i40e_hw *hw = &pf->hw;
8518 ret = i40e_aq_get_rss_key(hw, vsi->id,
8519 (struct i40e_aqc_get_set_rss_key_data *)seed);
8521 dev_info(&pf->pdev->dev,
8522 "Cannot get RSS key, err %s aq_err %s\n",
8523 i40e_stat_str(&pf->hw, ret),
8524 i40e_aq_str(&pf->hw,
8525 pf->hw.aq.asq_last_status));
8531 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8533 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8535 dev_info(&pf->pdev->dev,
8536 "Cannot get RSS lut, err %s aq_err %s\n",
8537 i40e_stat_str(&pf->hw, ret),
8538 i40e_aq_str(&pf->hw,
8539 pf->hw.aq.asq_last_status));
8548 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
8549 * @vsi: VSI structure
8551 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
8553 u8 seed[I40E_HKEY_ARRAY_SIZE];
8554 struct i40e_pf *pf = vsi->back;
8558 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
8562 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8563 vsi->num_queue_pairs);
8567 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8570 /* Use the user configured hash keys and lookup table if there is one,
8571 * otherwise use default
8573 if (vsi->rss_lut_user)
8574 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8576 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8577 if (vsi->rss_hkey_user)
8578 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8580 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8581 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
8588 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
8589 * @vsi: Pointer to vsi structure
8590 * @seed: RSS hash seed
8591 * @lut: Lookup table
8592 * @lut_size: Lookup table size
8594 * Returns 0 on success, negative on failure
8596 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
8597 const u8 *lut, u16 lut_size)
8599 struct i40e_pf *pf = vsi->back;
8600 struct i40e_hw *hw = &pf->hw;
8601 u16 vf_id = vsi->vf_id;
8604 /* Fill out hash function seed */
8606 u32 *seed_dw = (u32 *)seed;
8608 if (vsi->type == I40E_VSI_MAIN) {
8609 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8610 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
8611 } else if (vsi->type == I40E_VSI_SRIOV) {
8612 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
8613 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
8615 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
8620 u32 *lut_dw = (u32 *)lut;
8622 if (vsi->type == I40E_VSI_MAIN) {
8623 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8625 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8626 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
8627 } else if (vsi->type == I40E_VSI_SRIOV) {
8628 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
8630 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8631 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
8633 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8642 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
8643 * @vsi: Pointer to VSI structure
8644 * @seed: Buffer to store the keys
8645 * @lut: Buffer to store the lookup table entries
8646 * @lut_size: Size of buffer to store the lookup table entries
8648 * Returns 0 on success, negative on failure
8650 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
8651 u8 *lut, u16 lut_size)
8653 struct i40e_pf *pf = vsi->back;
8654 struct i40e_hw *hw = &pf->hw;
8658 u32 *seed_dw = (u32 *)seed;
8660 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8661 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
8664 u32 *lut_dw = (u32 *)lut;
8666 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8668 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8669 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
8676 * i40e_config_rss - Configure RSS keys and lut
8677 * @vsi: Pointer to VSI structure
8678 * @seed: RSS hash seed
8679 * @lut: Lookup table
8680 * @lut_size: Lookup table size
8682 * Returns 0 on success, negative on failure
8684 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8686 struct i40e_pf *pf = vsi->back;
8688 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
8689 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
8691 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
8695 * i40e_get_rss - Get RSS keys and lut
8696 * @vsi: Pointer to VSI structure
8697 * @seed: Buffer to store the keys
8698 * @lut: Buffer to store the lookup table entries
8699 * lut_size: Size of buffer to store the lookup table entries
8701 * Returns 0 on success, negative on failure
8703 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8705 struct i40e_pf *pf = vsi->back;
8707 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
8708 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
8710 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
8714 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
8715 * @pf: Pointer to board private structure
8716 * @lut: Lookup table
8717 * @rss_table_size: Lookup table size
8718 * @rss_size: Range of queue number for hashing
8720 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
8721 u16 rss_table_size, u16 rss_size)
8725 for (i = 0; i < rss_table_size; i++)
8726 lut[i] = i % rss_size;
8730 * i40e_pf_config_rss - Prepare for RSS if used
8731 * @pf: board private structure
8733 static int i40e_pf_config_rss(struct i40e_pf *pf)
8735 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8736 u8 seed[I40E_HKEY_ARRAY_SIZE];
8738 struct i40e_hw *hw = &pf->hw;
8743 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
8744 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
8745 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
8746 hena |= i40e_pf_get_default_rss_hena(pf);
8748 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
8749 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
8751 /* Determine the RSS table size based on the hardware capabilities */
8752 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
8753 reg_val = (pf->rss_table_size == 512) ?
8754 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
8755 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
8756 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
8758 /* Determine the RSS size of the VSI */
8759 if (!vsi->rss_size) {
8762 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
8763 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
8768 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8772 /* Use user configured lut if there is one, otherwise use default */
8773 if (vsi->rss_lut_user)
8774 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8776 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8778 /* Use user configured hash key if there is one, otherwise
8781 if (vsi->rss_hkey_user)
8782 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8784 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8785 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
8792 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
8793 * @pf: board private structure
8794 * @queue_count: the requested queue count for rss.
8796 * returns 0 if rss is not enabled, if enabled returns the final rss queue
8797 * count which may be different from the requested queue count.
8798 * Note: expects to be called while under rtnl_lock()
8800 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
8802 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8805 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
8808 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
8810 if (queue_count != vsi->num_queue_pairs) {
8813 vsi->req_queue_pairs = queue_count;
8814 i40e_prep_for_reset(pf, true);
8816 pf->alloc_rss_size = new_rss_size;
8818 i40e_reset_and_rebuild(pf, true, true);
8820 /* Discard the user configured hash keys and lut, if less
8821 * queues are enabled.
8823 if (queue_count < vsi->rss_size) {
8824 i40e_clear_rss_config_user(vsi);
8825 dev_dbg(&pf->pdev->dev,
8826 "discard user configured hash keys and lut\n");
8829 /* Reset vsi->rss_size, as number of enabled queues changed */
8830 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
8831 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
8833 i40e_pf_config_rss(pf);
8835 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
8836 vsi->req_queue_pairs, pf->rss_size_max);
8837 return pf->alloc_rss_size;
8841 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
8842 * @pf: board private structure
8844 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
8847 bool min_valid, max_valid;
8850 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
8851 &min_valid, &max_valid);
8855 pf->min_bw = min_bw;
8857 pf->max_bw = max_bw;
8864 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
8865 * @pf: board private structure
8867 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
8869 struct i40e_aqc_configure_partition_bw_data bw_data;
8872 /* Set the valid bit for this PF */
8873 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
8874 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
8875 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
8877 /* Set the new bandwidths */
8878 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
8884 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
8885 * @pf: board private structure
8887 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
8889 /* Commit temporary BW setting to permanent NVM image */
8890 enum i40e_admin_queue_err last_aq_status;
8894 if (pf->hw.partition_id != 1) {
8895 dev_info(&pf->pdev->dev,
8896 "Commit BW only works on partition 1! This is partition %d",
8897 pf->hw.partition_id);
8898 ret = I40E_NOT_SUPPORTED;
8902 /* Acquire NVM for read access */
8903 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
8904 last_aq_status = pf->hw.aq.asq_last_status;
8906 dev_info(&pf->pdev->dev,
8907 "Cannot acquire NVM for read access, err %s aq_err %s\n",
8908 i40e_stat_str(&pf->hw, ret),
8909 i40e_aq_str(&pf->hw, last_aq_status));
8913 /* Read word 0x10 of NVM - SW compatibility word 1 */
8914 ret = i40e_aq_read_nvm(&pf->hw,
8915 I40E_SR_NVM_CONTROL_WORD,
8916 0x10, sizeof(nvm_word), &nvm_word,
8918 /* Save off last admin queue command status before releasing
8921 last_aq_status = pf->hw.aq.asq_last_status;
8922 i40e_release_nvm(&pf->hw);
8924 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
8925 i40e_stat_str(&pf->hw, ret),
8926 i40e_aq_str(&pf->hw, last_aq_status));
8930 /* Wait a bit for NVM release to complete */
8933 /* Acquire NVM for write access */
8934 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
8935 last_aq_status = pf->hw.aq.asq_last_status;
8937 dev_info(&pf->pdev->dev,
8938 "Cannot acquire NVM for write access, err %s aq_err %s\n",
8939 i40e_stat_str(&pf->hw, ret),
8940 i40e_aq_str(&pf->hw, last_aq_status));
8943 /* Write it back out unchanged to initiate update NVM,
8944 * which will force a write of the shadow (alt) RAM to
8945 * the NVM - thus storing the bandwidth values permanently.
8947 ret = i40e_aq_update_nvm(&pf->hw,
8948 I40E_SR_NVM_CONTROL_WORD,
8949 0x10, sizeof(nvm_word),
8950 &nvm_word, true, NULL);
8951 /* Save off last admin queue command status before releasing
8954 last_aq_status = pf->hw.aq.asq_last_status;
8955 i40e_release_nvm(&pf->hw);
8957 dev_info(&pf->pdev->dev,
8958 "BW settings NOT SAVED, err %s aq_err %s\n",
8959 i40e_stat_str(&pf->hw, ret),
8960 i40e_aq_str(&pf->hw, last_aq_status));
8967 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
8968 * @pf: board private structure to initialize
8970 * i40e_sw_init initializes the Adapter private data structure.
8971 * Fields are initialized based on PCI device information and
8972 * OS network device settings (MTU size).
8974 static int i40e_sw_init(struct i40e_pf *pf)
8979 /* Set default capability flags */
8980 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
8981 I40E_FLAG_MSI_ENABLED |
8982 I40E_FLAG_MSIX_ENABLED;
8984 /* Set default ITR */
8985 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
8986 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
8988 /* Depending on PF configurations, it is possible that the RSS
8989 * maximum might end up larger than the available queues
8991 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
8992 pf->alloc_rss_size = 1;
8993 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
8994 pf->rss_size_max = min_t(int, pf->rss_size_max,
8995 pf->hw.func_caps.num_tx_qp);
8996 if (pf->hw.func_caps.rss) {
8997 pf->flags |= I40E_FLAG_RSS_ENABLED;
8998 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
9002 /* MFP mode enabled */
9003 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
9004 pf->flags |= I40E_FLAG_MFP_ENABLED;
9005 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
9006 if (i40e_get_partition_bw_setting(pf)) {
9007 dev_warn(&pf->pdev->dev,
9008 "Could not get partition bw settings\n");
9010 dev_info(&pf->pdev->dev,
9011 "Partition BW Min = %8.8x, Max = %8.8x\n",
9012 pf->min_bw, pf->max_bw);
9014 /* nudge the Tx scheduler */
9015 i40e_set_partition_bw_setting(pf);
9019 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
9020 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
9021 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
9022 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
9023 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
9024 pf->hw.num_partitions > 1)
9025 dev_info(&pf->pdev->dev,
9026 "Flow Director Sideband mode Disabled in MFP mode\n");
9028 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
9029 pf->fdir_pf_filter_count =
9030 pf->hw.func_caps.fd_filters_guaranteed;
9031 pf->hw.fdir_shared_filter_count =
9032 pf->hw.func_caps.fd_filters_best_effort;
9035 if (pf->hw.mac.type == I40E_MAC_X722) {
9036 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
9037 I40E_HW_128_QP_RSS_CAPABLE |
9038 I40E_HW_ATR_EVICT_CAPABLE |
9039 I40E_HW_WB_ON_ITR_CAPABLE |
9040 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
9041 I40E_HW_NO_PCI_LINK_CHECK |
9042 I40E_HW_USE_SET_LLDP_MIB |
9043 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
9044 I40E_HW_PTP_L4_CAPABLE |
9045 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
9046 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
9048 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
9049 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
9050 I40E_FDEVICT_PCTYPE_DEFAULT) {
9051 dev_warn(&pf->pdev->dev,
9052 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
9053 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
9055 } else if ((pf->hw.aq.api_maj_ver > 1) ||
9056 ((pf->hw.aq.api_maj_ver == 1) &&
9057 (pf->hw.aq.api_min_ver > 4))) {
9058 /* Supported in FW API version higher than 1.4 */
9059 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
9062 /* Enable HW ATR eviction if possible */
9063 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
9064 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
9066 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
9067 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
9068 (pf->hw.aq.fw_maj_ver < 4))) {
9069 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
9070 /* No DCB support for FW < v4.33 */
9071 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
9074 /* Disable FW LLDP if FW < v4.3 */
9075 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
9076 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
9077 (pf->hw.aq.fw_maj_ver < 4)))
9078 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
9080 /* Use the FW Set LLDP MIB API if FW > v4.40 */
9081 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
9082 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
9083 (pf->hw.aq.fw_maj_ver >= 5)))
9084 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
9086 /* Enable PTP L4 if FW > v6.0 */
9087 if (pf->hw.mac.type == I40E_MAC_XL710 &&
9088 pf->hw.aq.fw_maj_ver >= 6)
9089 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
9091 if (pf->hw.func_caps.vmdq) {
9092 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
9093 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
9094 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
9097 if (pf->hw.func_caps.iwarp) {
9098 pf->flags |= I40E_FLAG_IWARP_ENABLED;
9099 /* IWARP needs one extra vector for CQP just like MISC.*/
9100 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
9103 #ifdef CONFIG_PCI_IOV
9104 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
9105 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
9106 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
9107 pf->num_req_vfs = min_t(int,
9108 pf->hw.func_caps.num_vfs,
9111 #endif /* CONFIG_PCI_IOV */
9112 pf->eeprom_version = 0xDEAD;
9113 pf->lan_veb = I40E_NO_VEB;
9114 pf->lan_vsi = I40E_NO_VSI;
9116 /* By default FW has this off for performance reasons */
9117 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
9119 /* set up queue assignment tracking */
9120 size = sizeof(struct i40e_lump_tracking)
9121 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
9122 pf->qp_pile = kzalloc(size, GFP_KERNEL);
9127 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
9128 pf->qp_pile->search_hint = 0;
9130 pf->tx_timeout_recovery_level = 1;
9132 mutex_init(&pf->switch_mutex);
9139 * i40e_set_ntuple - set the ntuple feature flag and take action
9140 * @pf: board private structure to initialize
9141 * @features: the feature set that the stack is suggesting
9143 * returns a bool to indicate if reset needs to happen
9145 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
9147 bool need_reset = false;
9149 /* Check if Flow Director n-tuple support was enabled or disabled. If
9150 * the state changed, we need to reset.
9152 if (features & NETIF_F_NTUPLE) {
9153 /* Enable filters and mark for reset */
9154 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
9156 /* enable FD_SB only if there is MSI-X vector */
9157 if (pf->num_fdsb_msix > 0)
9158 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
9160 /* turn off filters, mark for reset and clear SW filter list */
9161 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9163 i40e_fdir_filter_exit(pf);
9165 pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED |
9166 I40E_FLAG_FD_SB_AUTO_DISABLED);
9167 /* reset fd counters */
9170 /* if ATR was auto disabled it can be re-enabled. */
9171 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
9172 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
9173 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
9174 (I40E_DEBUG_FD & pf->hw.debug_mask))
9175 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
9182 * i40e_clear_rss_lut - clear the rx hash lookup table
9183 * @vsi: the VSI being configured
9185 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
9187 struct i40e_pf *pf = vsi->back;
9188 struct i40e_hw *hw = &pf->hw;
9189 u16 vf_id = vsi->vf_id;
9192 if (vsi->type == I40E_VSI_MAIN) {
9193 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
9194 wr32(hw, I40E_PFQF_HLUT(i), 0);
9195 } else if (vsi->type == I40E_VSI_SRIOV) {
9196 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
9197 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
9199 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
9204 * i40e_set_features - set the netdev feature flags
9205 * @netdev: ptr to the netdev being adjusted
9206 * @features: the feature set that the stack is suggesting
9207 * Note: expects to be called while under rtnl_lock()
9209 static int i40e_set_features(struct net_device *netdev,
9210 netdev_features_t features)
9212 struct i40e_netdev_priv *np = netdev_priv(netdev);
9213 struct i40e_vsi *vsi = np->vsi;
9214 struct i40e_pf *pf = vsi->back;
9217 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
9218 i40e_pf_config_rss(pf);
9219 else if (!(features & NETIF_F_RXHASH) &&
9220 netdev->features & NETIF_F_RXHASH)
9221 i40e_clear_rss_lut(vsi);
9223 if (features & NETIF_F_HW_VLAN_CTAG_RX)
9224 i40e_vlan_stripping_enable(vsi);
9226 i40e_vlan_stripping_disable(vsi);
9228 need_reset = i40e_set_ntuple(pf, features);
9231 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
9237 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
9238 * @pf: board private structure
9239 * @port: The UDP port to look up
9241 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
9243 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
9247 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9248 if (pf->udp_ports[i].port == port)
9256 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
9257 * @netdev: This physical port's netdev
9258 * @ti: Tunnel endpoint information
9260 static void i40e_udp_tunnel_add(struct net_device *netdev,
9261 struct udp_tunnel_info *ti)
9263 struct i40e_netdev_priv *np = netdev_priv(netdev);
9264 struct i40e_vsi *vsi = np->vsi;
9265 struct i40e_pf *pf = vsi->back;
9266 u16 port = ntohs(ti->port);
9270 idx = i40e_get_udp_port_idx(pf, port);
9272 /* Check if port already exists */
9273 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
9274 netdev_info(netdev, "port %d already offloaded\n", port);
9278 /* Now check if there is space to add the new port */
9279 next_idx = i40e_get_udp_port_idx(pf, 0);
9281 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
9282 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
9288 case UDP_TUNNEL_TYPE_VXLAN:
9289 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
9291 case UDP_TUNNEL_TYPE_GENEVE:
9292 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
9294 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
9300 /* New port: add it and mark its index in the bitmap */
9301 pf->udp_ports[next_idx].port = port;
9302 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
9303 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
9307 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
9308 * @netdev: This physical port's netdev
9309 * @ti: Tunnel endpoint information
9311 static void i40e_udp_tunnel_del(struct net_device *netdev,
9312 struct udp_tunnel_info *ti)
9314 struct i40e_netdev_priv *np = netdev_priv(netdev);
9315 struct i40e_vsi *vsi = np->vsi;
9316 struct i40e_pf *pf = vsi->back;
9317 u16 port = ntohs(ti->port);
9320 idx = i40e_get_udp_port_idx(pf, port);
9322 /* Check if port already exists */
9323 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
9327 case UDP_TUNNEL_TYPE_VXLAN:
9328 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
9331 case UDP_TUNNEL_TYPE_GENEVE:
9332 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
9339 /* if port exists, set it to 0 (mark for deletion)
9340 * and make it pending
9342 pf->udp_ports[idx].port = 0;
9343 pf->pending_udp_bitmap |= BIT_ULL(idx);
9344 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
9348 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
9352 static int i40e_get_phys_port_id(struct net_device *netdev,
9353 struct netdev_phys_item_id *ppid)
9355 struct i40e_netdev_priv *np = netdev_priv(netdev);
9356 struct i40e_pf *pf = np->vsi->back;
9357 struct i40e_hw *hw = &pf->hw;
9359 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
9362 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
9363 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
9369 * i40e_ndo_fdb_add - add an entry to the hardware database
9370 * @ndm: the input from the stack
9371 * @tb: pointer to array of nladdr (unused)
9372 * @dev: the net device pointer
9373 * @addr: the MAC address entry being added
9374 * @flags: instructions from stack about fdb operation
9376 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9377 struct net_device *dev,
9378 const unsigned char *addr, u16 vid,
9381 struct i40e_netdev_priv *np = netdev_priv(dev);
9382 struct i40e_pf *pf = np->vsi->back;
9385 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
9389 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
9393 /* Hardware does not support aging addresses so if a
9394 * ndm_state is given only allow permanent addresses
9396 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
9397 netdev_info(dev, "FDB only supports static addresses\n");
9401 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
9402 err = dev_uc_add_excl(dev, addr);
9403 else if (is_multicast_ether_addr(addr))
9404 err = dev_mc_add_excl(dev, addr);
9408 /* Only return duplicate errors if NLM_F_EXCL is set */
9409 if (err == -EEXIST && !(flags & NLM_F_EXCL))
9416 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
9417 * @dev: the netdev being configured
9418 * @nlh: RTNL message
9420 * Inserts a new hardware bridge if not already created and
9421 * enables the bridging mode requested (VEB or VEPA). If the
9422 * hardware bridge has already been inserted and the request
9423 * is to change the mode then that requires a PF reset to
9424 * allow rebuild of the components with required hardware
9425 * bridge mode enabled.
9427 * Note: expects to be called while under rtnl_lock()
9429 static int i40e_ndo_bridge_setlink(struct net_device *dev,
9430 struct nlmsghdr *nlh,
9433 struct i40e_netdev_priv *np = netdev_priv(dev);
9434 struct i40e_vsi *vsi = np->vsi;
9435 struct i40e_pf *pf = vsi->back;
9436 struct i40e_veb *veb = NULL;
9437 struct nlattr *attr, *br_spec;
9440 /* Only for PF VSI for now */
9441 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
9444 /* Find the HW bridge for PF VSI */
9445 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9446 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9450 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9452 nla_for_each_nested(attr, br_spec, rem) {
9455 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9458 mode = nla_get_u16(attr);
9459 if ((mode != BRIDGE_MODE_VEPA) &&
9460 (mode != BRIDGE_MODE_VEB))
9463 /* Insert a new HW bridge */
9465 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9466 vsi->tc_config.enabled_tc);
9468 veb->bridge_mode = mode;
9469 i40e_config_bridge_mode(veb);
9471 /* No Bridge HW offload available */
9475 } else if (mode != veb->bridge_mode) {
9476 /* Existing HW bridge but different mode needs reset */
9477 veb->bridge_mode = mode;
9478 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
9479 if (mode == BRIDGE_MODE_VEB)
9480 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
9482 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9483 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED),
9493 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
9496 * @seq: RTNL message seq #
9497 * @dev: the netdev being configured
9498 * @filter_mask: unused
9499 * @nlflags: netlink flags passed in
9501 * Return the mode in which the hardware bridge is operating in
9504 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9505 struct net_device *dev,
9506 u32 __always_unused filter_mask,
9509 struct i40e_netdev_priv *np = netdev_priv(dev);
9510 struct i40e_vsi *vsi = np->vsi;
9511 struct i40e_pf *pf = vsi->back;
9512 struct i40e_veb *veb = NULL;
9515 /* Only for PF VSI for now */
9516 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
9519 /* Find the HW bridge for the PF VSI */
9520 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9521 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9528 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
9529 0, 0, nlflags, filter_mask, NULL);
9533 * i40e_features_check - Validate encapsulated packet conforms to limits
9535 * @dev: This physical port's netdev
9536 * @features: Offload features that the stack believes apply
9538 static netdev_features_t i40e_features_check(struct sk_buff *skb,
9539 struct net_device *dev,
9540 netdev_features_t features)
9544 /* No point in doing any of this if neither checksum nor GSO are
9545 * being requested for this frame. We can rule out both by just
9546 * checking for CHECKSUM_PARTIAL
9548 if (skb->ip_summed != CHECKSUM_PARTIAL)
9551 /* We cannot support GSO if the MSS is going to be less than
9552 * 64 bytes. If it is then we need to drop support for GSO.
9554 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
9555 features &= ~NETIF_F_GSO_MASK;
9557 /* MACLEN can support at most 63 words */
9558 len = skb_network_header(skb) - skb->data;
9559 if (len & ~(63 * 2))
9562 /* IPLEN and EIPLEN can support at most 127 dwords */
9563 len = skb_transport_header(skb) - skb_network_header(skb);
9564 if (len & ~(127 * 4))
9567 if (skb->encapsulation) {
9568 /* L4TUNLEN can support 127 words */
9569 len = skb_inner_network_header(skb) - skb_transport_header(skb);
9570 if (len & ~(127 * 2))
9573 /* IPLEN can support at most 127 dwords */
9574 len = skb_inner_transport_header(skb) -
9575 skb_inner_network_header(skb);
9576 if (len & ~(127 * 4))
9580 /* No need to validate L4LEN as TCP is the only protocol with a
9581 * a flexible value and we support all possible values supported
9582 * by TCP, which is at most 15 dwords
9587 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9591 * i40e_xdp_setup - add/remove an XDP program
9592 * @vsi: VSI to changed
9593 * @prog: XDP program
9595 static int i40e_xdp_setup(struct i40e_vsi *vsi,
9596 struct bpf_prog *prog)
9598 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
9599 struct i40e_pf *pf = vsi->back;
9600 struct bpf_prog *old_prog;
9604 /* Don't allow frames that span over multiple buffers */
9605 if (frame_size > vsi->rx_buf_len)
9608 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
9611 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
9612 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
9615 i40e_prep_for_reset(pf, true);
9617 old_prog = xchg(&vsi->xdp_prog, prog);
9620 i40e_reset_and_rebuild(pf, true, true);
9622 for (i = 0; i < vsi->num_queue_pairs; i++)
9623 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
9626 bpf_prog_put(old_prog);
9632 * i40e_xdp - implements ndo_xdp for i40e
9636 static int i40e_xdp(struct net_device *dev,
9637 struct netdev_xdp *xdp)
9639 struct i40e_netdev_priv *np = netdev_priv(dev);
9640 struct i40e_vsi *vsi = np->vsi;
9642 if (vsi->type != I40E_VSI_MAIN)
9645 switch (xdp->command) {
9646 case XDP_SETUP_PROG:
9647 return i40e_xdp_setup(vsi, xdp->prog);
9648 case XDP_QUERY_PROG:
9649 xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
9650 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
9657 static const struct net_device_ops i40e_netdev_ops = {
9658 .ndo_open = i40e_open,
9659 .ndo_stop = i40e_close,
9660 .ndo_start_xmit = i40e_lan_xmit_frame,
9661 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
9662 .ndo_set_rx_mode = i40e_set_rx_mode,
9663 .ndo_validate_addr = eth_validate_addr,
9664 .ndo_set_mac_address = i40e_set_mac,
9665 .ndo_change_mtu = i40e_change_mtu,
9666 .ndo_do_ioctl = i40e_ioctl,
9667 .ndo_tx_timeout = i40e_tx_timeout,
9668 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
9669 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
9670 #ifdef CONFIG_NET_POLL_CONTROLLER
9671 .ndo_poll_controller = i40e_netpoll,
9673 .ndo_setup_tc = __i40e_setup_tc,
9674 .ndo_set_features = i40e_set_features,
9675 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
9676 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
9677 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
9678 .ndo_get_vf_config = i40e_ndo_get_vf_config,
9679 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
9680 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
9681 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
9682 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
9683 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
9684 .ndo_get_phys_port_id = i40e_get_phys_port_id,
9685 .ndo_fdb_add = i40e_ndo_fdb_add,
9686 .ndo_features_check = i40e_features_check,
9687 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
9688 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
9689 .ndo_xdp = i40e_xdp,
9693 * i40e_config_netdev - Setup the netdev flags
9694 * @vsi: the VSI being configured
9696 * Returns 0 on success, negative value on failure
9698 static int i40e_config_netdev(struct i40e_vsi *vsi)
9700 struct i40e_pf *pf = vsi->back;
9701 struct i40e_hw *hw = &pf->hw;
9702 struct i40e_netdev_priv *np;
9703 struct net_device *netdev;
9704 u8 broadcast[ETH_ALEN];
9705 u8 mac_addr[ETH_ALEN];
9707 netdev_features_t hw_enc_features;
9708 netdev_features_t hw_features;
9710 etherdev_size = sizeof(struct i40e_netdev_priv);
9711 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
9715 vsi->netdev = netdev;
9716 np = netdev_priv(netdev);
9719 hw_enc_features = NETIF_F_SG |
9723 NETIF_F_SOFT_FEATURES |
9728 NETIF_F_GSO_GRE_CSUM |
9729 NETIF_F_GSO_PARTIAL |
9730 NETIF_F_GSO_UDP_TUNNEL |
9731 NETIF_F_GSO_UDP_TUNNEL_CSUM |
9737 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
9738 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
9740 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
9742 netdev->hw_enc_features |= hw_enc_features;
9744 /* record features VLANs can make use of */
9745 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
9747 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
9748 netdev->hw_features |= NETIF_F_NTUPLE;
9749 hw_features = hw_enc_features |
9750 NETIF_F_HW_VLAN_CTAG_TX |
9751 NETIF_F_HW_VLAN_CTAG_RX;
9753 netdev->hw_features |= hw_features;
9755 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
9756 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
9758 if (vsi->type == I40E_VSI_MAIN) {
9759 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
9760 ether_addr_copy(mac_addr, hw->mac.perm_addr);
9761 /* The following steps are necessary for two reasons. First,
9762 * some older NVM configurations load a default MAC-VLAN
9763 * filter that will accept any tagged packet, and we want to
9764 * replace this with a normal filter. Additionally, it is
9765 * possible our MAC address was provided by the platform using
9766 * Open Firmware or similar.
9768 * Thus, we need to remove the default filter and install one
9769 * specific to the MAC address.
9771 i40e_rm_default_mac_filter(vsi, mac_addr);
9772 spin_lock_bh(&vsi->mac_filter_hash_lock);
9773 i40e_add_mac_filter(vsi, mac_addr);
9774 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9776 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
9777 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
9778 * the end, which is 4 bytes long, so force truncation of the
9779 * original name by IFNAMSIZ - 4
9781 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
9783 pf->vsi[pf->lan_vsi]->netdev->name);
9784 random_ether_addr(mac_addr);
9786 spin_lock_bh(&vsi->mac_filter_hash_lock);
9787 i40e_add_mac_filter(vsi, mac_addr);
9788 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9791 /* Add the broadcast filter so that we initially will receive
9792 * broadcast packets. Note that when a new VLAN is first added the
9793 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
9794 * specific filters as part of transitioning into "vlan" operation.
9795 * When more VLANs are added, the driver will copy each existing MAC
9796 * filter and add it for the new VLAN.
9798 * Broadcast filters are handled specially by
9799 * i40e_sync_filters_subtask, as the driver must to set the broadcast
9800 * promiscuous bit instead of adding this directly as a MAC/VLAN
9801 * filter. The subtask will update the correct broadcast promiscuous
9802 * bits as VLANs become active or inactive.
9804 eth_broadcast_addr(broadcast);
9805 spin_lock_bh(&vsi->mac_filter_hash_lock);
9806 i40e_add_mac_filter(vsi, broadcast);
9807 spin_unlock_bh(&vsi->mac_filter_hash_lock);
9809 ether_addr_copy(netdev->dev_addr, mac_addr);
9810 ether_addr_copy(netdev->perm_addr, mac_addr);
9812 netdev->priv_flags |= IFF_UNICAST_FLT;
9813 netdev->priv_flags |= IFF_SUPP_NOFCS;
9814 /* Setup netdev TC information */
9815 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
9817 netdev->netdev_ops = &i40e_netdev_ops;
9818 netdev->watchdog_timeo = 5 * HZ;
9819 i40e_set_ethtool_ops(netdev);
9821 /* MTU range: 68 - 9706 */
9822 netdev->min_mtu = ETH_MIN_MTU;
9823 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
9829 * i40e_vsi_delete - Delete a VSI from the switch
9830 * @vsi: the VSI being removed
9832 * Returns 0 on success, negative value on failure
9834 static void i40e_vsi_delete(struct i40e_vsi *vsi)
9836 /* remove default VSI is not allowed */
9837 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
9840 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
9844 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
9845 * @vsi: the VSI being queried
9847 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
9849 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
9851 struct i40e_veb *veb;
9852 struct i40e_pf *pf = vsi->back;
9854 /* Uplink is not a bridge so default to VEB */
9855 if (vsi->veb_idx == I40E_NO_VEB)
9858 veb = pf->veb[vsi->veb_idx];
9860 dev_info(&pf->pdev->dev,
9861 "There is no veb associated with the bridge\n");
9865 /* Uplink is a bridge in VEPA mode */
9866 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
9869 /* Uplink is a bridge in VEB mode */
9873 /* VEPA is now default bridge, so return 0 */
9878 * i40e_add_vsi - Add a VSI to the switch
9879 * @vsi: the VSI being configured
9881 * This initializes a VSI context depending on the VSI type to be added and
9882 * passes it down to the add_vsi aq command.
9884 static int i40e_add_vsi(struct i40e_vsi *vsi)
9887 struct i40e_pf *pf = vsi->back;
9888 struct i40e_hw *hw = &pf->hw;
9889 struct i40e_vsi_context ctxt;
9890 struct i40e_mac_filter *f;
9891 struct hlist_node *h;
9894 u8 enabled_tc = 0x1; /* TC0 enabled */
9897 memset(&ctxt, 0, sizeof(ctxt));
9898 switch (vsi->type) {
9900 /* The PF's main VSI is already setup as part of the
9901 * device initialization, so we'll not bother with
9902 * the add_vsi call, but we will retrieve the current
9905 ctxt.seid = pf->main_vsi_seid;
9906 ctxt.pf_num = pf->hw.pf_id;
9908 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9909 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9911 dev_info(&pf->pdev->dev,
9912 "couldn't get PF vsi config, err %s aq_err %s\n",
9913 i40e_stat_str(&pf->hw, ret),
9914 i40e_aq_str(&pf->hw,
9915 pf->hw.aq.asq_last_status));
9918 vsi->info = ctxt.info;
9919 vsi->info.valid_sections = 0;
9921 vsi->seid = ctxt.seid;
9922 vsi->id = ctxt.vsi_number;
9924 enabled_tc = i40e_pf_get_tc_map(pf);
9926 /* Source pruning is enabled by default, so the flag is
9927 * negative logic - if it's set, we need to fiddle with
9928 * the VSI to disable source pruning.
9930 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
9931 memset(&ctxt, 0, sizeof(ctxt));
9932 ctxt.seid = pf->main_vsi_seid;
9933 ctxt.pf_num = pf->hw.pf_id;
9935 ctxt.info.valid_sections |=
9936 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9937 ctxt.info.switch_id =
9938 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
9939 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9941 dev_info(&pf->pdev->dev,
9942 "update vsi failed, err %s aq_err %s\n",
9943 i40e_stat_str(&pf->hw, ret),
9944 i40e_aq_str(&pf->hw,
9945 pf->hw.aq.asq_last_status));
9951 /* MFP mode setup queue map and update VSI */
9952 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
9953 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
9954 memset(&ctxt, 0, sizeof(ctxt));
9955 ctxt.seid = pf->main_vsi_seid;
9956 ctxt.pf_num = pf->hw.pf_id;
9958 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
9959 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9961 dev_info(&pf->pdev->dev,
9962 "update vsi failed, err %s aq_err %s\n",
9963 i40e_stat_str(&pf->hw, ret),
9964 i40e_aq_str(&pf->hw,
9965 pf->hw.aq.asq_last_status));
9969 /* update the local VSI info queue map */
9970 i40e_vsi_update_queue_map(vsi, &ctxt);
9971 vsi->info.valid_sections = 0;
9973 /* Default/Main VSI is only enabled for TC0
9974 * reconfigure it to enable all TCs that are
9975 * available on the port in SFP mode.
9976 * For MFP case the iSCSI PF would use this
9977 * flow to enable LAN+iSCSI TC.
9979 ret = i40e_vsi_config_tc(vsi, enabled_tc);
9981 /* Single TC condition is not fatal,
9982 * message and continue
9984 dev_info(&pf->pdev->dev,
9985 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
9987 i40e_stat_str(&pf->hw, ret),
9988 i40e_aq_str(&pf->hw,
9989 pf->hw.aq.asq_last_status));
9995 ctxt.pf_num = hw->pf_id;
9997 ctxt.uplink_seid = vsi->uplink_seid;
9998 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9999 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10000 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
10001 (i40e_is_vsi_uplink_mode_veb(vsi))) {
10002 ctxt.info.valid_sections |=
10003 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10004 ctxt.info.switch_id =
10005 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10007 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
10010 case I40E_VSI_VMDQ2:
10011 ctxt.pf_num = hw->pf_id;
10013 ctxt.uplink_seid = vsi->uplink_seid;
10014 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
10015 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
10017 /* This VSI is connected to VEB so the switch_id
10018 * should be set to zero by default.
10020 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
10021 ctxt.info.valid_sections |=
10022 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10023 ctxt.info.switch_id =
10024 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10027 /* Setup the VSI tx/rx queue map for TC0 only for now */
10028 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
10031 case I40E_VSI_SRIOV:
10032 ctxt.pf_num = hw->pf_id;
10033 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
10034 ctxt.uplink_seid = vsi->uplink_seid;
10035 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
10036 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
10038 /* This VSI is connected to VEB so the switch_id
10039 * should be set to zero by default.
10041 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
10042 ctxt.info.valid_sections |=
10043 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10044 ctxt.info.switch_id =
10045 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10048 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
10049 ctxt.info.valid_sections |=
10050 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
10051 ctxt.info.queueing_opt_flags |=
10052 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
10053 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
10056 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
10057 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
10058 if (pf->vf[vsi->vf_id].spoofchk) {
10059 ctxt.info.valid_sections |=
10060 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
10061 ctxt.info.sec_flags |=
10062 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
10063 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
10065 /* Setup the VSI tx/rx queue map for TC0 only for now */
10066 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
10069 case I40E_VSI_IWARP:
10070 /* send down message to iWARP */
10077 if (vsi->type != I40E_VSI_MAIN) {
10078 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
10080 dev_info(&vsi->back->pdev->dev,
10081 "add vsi failed, err %s aq_err %s\n",
10082 i40e_stat_str(&pf->hw, ret),
10083 i40e_aq_str(&pf->hw,
10084 pf->hw.aq.asq_last_status));
10088 vsi->info = ctxt.info;
10089 vsi->info.valid_sections = 0;
10090 vsi->seid = ctxt.seid;
10091 vsi->id = ctxt.vsi_number;
10094 vsi->active_filters = 0;
10095 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
10096 spin_lock_bh(&vsi->mac_filter_hash_lock);
10097 /* If macvlan filters already exist, force them to get loaded */
10098 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
10099 f->state = I40E_FILTER_NEW;
10102 spin_unlock_bh(&vsi->mac_filter_hash_lock);
10105 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
10106 pf->flags |= I40E_FLAG_FILTER_SYNC;
10109 /* Update VSI BW information */
10110 ret = i40e_vsi_get_bw_info(vsi);
10112 dev_info(&pf->pdev->dev,
10113 "couldn't get vsi bw info, err %s aq_err %s\n",
10114 i40e_stat_str(&pf->hw, ret),
10115 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10116 /* VSI is already added so not tearing that up */
10125 * i40e_vsi_release - Delete a VSI and free its resources
10126 * @vsi: the VSI being removed
10128 * Returns 0 on success or < 0 on error
10130 int i40e_vsi_release(struct i40e_vsi *vsi)
10132 struct i40e_mac_filter *f;
10133 struct hlist_node *h;
10134 struct i40e_veb *veb = NULL;
10135 struct i40e_pf *pf;
10141 /* release of a VEB-owner or last VSI is not allowed */
10142 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
10143 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
10144 vsi->seid, vsi->uplink_seid);
10147 if (vsi == pf->vsi[pf->lan_vsi] &&
10148 !test_bit(__I40E_DOWN, pf->state)) {
10149 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
10153 uplink_seid = vsi->uplink_seid;
10154 if (vsi->type != I40E_VSI_SRIOV) {
10155 if (vsi->netdev_registered) {
10156 vsi->netdev_registered = false;
10158 /* results in a call to i40e_close() */
10159 unregister_netdev(vsi->netdev);
10162 i40e_vsi_close(vsi);
10164 i40e_vsi_disable_irq(vsi);
10167 spin_lock_bh(&vsi->mac_filter_hash_lock);
10169 /* clear the sync flag on all filters */
10171 __dev_uc_unsync(vsi->netdev, NULL);
10172 __dev_mc_unsync(vsi->netdev, NULL);
10175 /* make sure any remaining filters are marked for deletion */
10176 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
10177 __i40e_del_filter(vsi, f);
10179 spin_unlock_bh(&vsi->mac_filter_hash_lock);
10181 i40e_sync_vsi_filters(vsi);
10183 i40e_vsi_delete(vsi);
10184 i40e_vsi_free_q_vectors(vsi);
10186 free_netdev(vsi->netdev);
10187 vsi->netdev = NULL;
10189 i40e_vsi_clear_rings(vsi);
10190 i40e_vsi_clear(vsi);
10192 /* If this was the last thing on the VEB, except for the
10193 * controlling VSI, remove the VEB, which puts the controlling
10194 * VSI onto the next level down in the switch.
10196 * Well, okay, there's one more exception here: don't remove
10197 * the orphan VEBs yet. We'll wait for an explicit remove request
10198 * from up the network stack.
10200 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
10202 pf->vsi[i]->uplink_seid == uplink_seid &&
10203 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
10204 n++; /* count the VSIs */
10207 for (i = 0; i < I40E_MAX_VEB; i++) {
10210 if (pf->veb[i]->uplink_seid == uplink_seid)
10211 n++; /* count the VEBs */
10212 if (pf->veb[i]->seid == uplink_seid)
10215 if (n == 0 && veb && veb->uplink_seid != 0)
10216 i40e_veb_release(veb);
10222 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
10223 * @vsi: ptr to the VSI
10225 * This should only be called after i40e_vsi_mem_alloc() which allocates the
10226 * corresponding SW VSI structure and initializes num_queue_pairs for the
10227 * newly allocated VSI.
10229 * Returns 0 on success or negative on failure
10231 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
10234 struct i40e_pf *pf = vsi->back;
10236 if (vsi->q_vectors[0]) {
10237 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
10242 if (vsi->base_vector) {
10243 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
10244 vsi->seid, vsi->base_vector);
10248 ret = i40e_vsi_alloc_q_vectors(vsi);
10250 dev_info(&pf->pdev->dev,
10251 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
10252 vsi->num_q_vectors, vsi->seid, ret);
10253 vsi->num_q_vectors = 0;
10254 goto vector_setup_out;
10257 /* In Legacy mode, we do not have to get any other vector since we
10258 * piggyback on the misc/ICR0 for queue interrupts.
10260 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10262 if (vsi->num_q_vectors)
10263 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
10264 vsi->num_q_vectors, vsi->idx);
10265 if (vsi->base_vector < 0) {
10266 dev_info(&pf->pdev->dev,
10267 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
10268 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
10269 i40e_vsi_free_q_vectors(vsi);
10271 goto vector_setup_out;
10279 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
10280 * @vsi: pointer to the vsi.
10282 * This re-allocates a vsi's queue resources.
10284 * Returns pointer to the successfully allocated and configured VSI sw struct
10285 * on success, otherwise returns NULL on failure.
10287 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
10289 u16 alloc_queue_pairs;
10290 struct i40e_pf *pf;
10299 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10300 i40e_vsi_clear_rings(vsi);
10302 i40e_vsi_free_arrays(vsi, false);
10303 i40e_set_num_rings_in_vsi(vsi);
10304 ret = i40e_vsi_alloc_arrays(vsi, false);
10308 alloc_queue_pairs = vsi->alloc_queue_pairs *
10309 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
10311 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
10313 dev_info(&pf->pdev->dev,
10314 "failed to get tracking for %d queues for VSI %d err %d\n",
10315 alloc_queue_pairs, vsi->seid, ret);
10318 vsi->base_queue = ret;
10320 /* Update the FW view of the VSI. Force a reset of TC and queue
10321 * layout configurations.
10323 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
10324 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
10325 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
10326 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
10327 if (vsi->type == I40E_VSI_MAIN)
10328 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
10330 /* assign it some queues */
10331 ret = i40e_alloc_rings(vsi);
10335 /* map all of the rings to the q_vectors */
10336 i40e_vsi_map_rings_to_vectors(vsi);
10340 i40e_vsi_free_q_vectors(vsi);
10341 if (vsi->netdev_registered) {
10342 vsi->netdev_registered = false;
10343 unregister_netdev(vsi->netdev);
10344 free_netdev(vsi->netdev);
10345 vsi->netdev = NULL;
10347 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
10349 i40e_vsi_clear(vsi);
10354 * i40e_vsi_setup - Set up a VSI by a given type
10355 * @pf: board private structure
10357 * @uplink_seid: the switch element to link to
10358 * @param1: usage depends upon VSI type. For VF types, indicates VF id
10360 * This allocates the sw VSI structure and its queue resources, then add a VSI
10361 * to the identified VEB.
10363 * Returns pointer to the successfully allocated and configure VSI sw struct on
10364 * success, otherwise returns NULL on failure.
10366 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
10367 u16 uplink_seid, u32 param1)
10369 struct i40e_vsi *vsi = NULL;
10370 struct i40e_veb *veb = NULL;
10371 u16 alloc_queue_pairs;
10375 /* The requested uplink_seid must be either
10376 * - the PF's port seid
10377 * no VEB is needed because this is the PF
10378 * or this is a Flow Director special case VSI
10379 * - seid of an existing VEB
10380 * - seid of a VSI that owns an existing VEB
10381 * - seid of a VSI that doesn't own a VEB
10382 * a new VEB is created and the VSI becomes the owner
10383 * - seid of the PF VSI, which is what creates the first VEB
10384 * this is a special case of the previous
10386 * Find which uplink_seid we were given and create a new VEB if needed
10388 for (i = 0; i < I40E_MAX_VEB; i++) {
10389 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
10395 if (!veb && uplink_seid != pf->mac_seid) {
10397 for (i = 0; i < pf->num_alloc_vsi; i++) {
10398 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
10404 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
10409 if (vsi->uplink_seid == pf->mac_seid)
10410 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
10411 vsi->tc_config.enabled_tc);
10412 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
10413 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
10414 vsi->tc_config.enabled_tc);
10416 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
10417 dev_info(&vsi->back->pdev->dev,
10418 "New VSI creation error, uplink seid of LAN VSI expected.\n");
10421 /* We come up by default in VEPA mode if SRIOV is not
10422 * already enabled, in which case we can't force VEPA
10425 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
10426 veb->bridge_mode = BRIDGE_MODE_VEPA;
10427 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
10429 i40e_config_bridge_mode(veb);
10431 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
10432 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
10436 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
10440 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10441 uplink_seid = veb->seid;
10444 /* get vsi sw struct */
10445 v_idx = i40e_vsi_mem_alloc(pf, type);
10448 vsi = pf->vsi[v_idx];
10452 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
10454 if (type == I40E_VSI_MAIN)
10455 pf->lan_vsi = v_idx;
10456 else if (type == I40E_VSI_SRIOV)
10457 vsi->vf_id = param1;
10458 /* assign it some queues */
10459 alloc_queue_pairs = vsi->alloc_queue_pairs *
10460 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
10462 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
10464 dev_info(&pf->pdev->dev,
10465 "failed to get tracking for %d queues for VSI %d err=%d\n",
10466 alloc_queue_pairs, vsi->seid, ret);
10469 vsi->base_queue = ret;
10471 /* get a VSI from the hardware */
10472 vsi->uplink_seid = uplink_seid;
10473 ret = i40e_add_vsi(vsi);
10477 switch (vsi->type) {
10478 /* setup the netdev if needed */
10479 case I40E_VSI_MAIN:
10480 case I40E_VSI_VMDQ2:
10481 ret = i40e_config_netdev(vsi);
10484 ret = register_netdev(vsi->netdev);
10487 vsi->netdev_registered = true;
10488 netif_carrier_off(vsi->netdev);
10489 #ifdef CONFIG_I40E_DCB
10490 /* Setup DCB netlink interface */
10491 i40e_dcbnl_setup(vsi);
10492 #endif /* CONFIG_I40E_DCB */
10495 case I40E_VSI_FDIR:
10496 /* set up vectors and rings if needed */
10497 ret = i40e_vsi_setup_vectors(vsi);
10501 ret = i40e_alloc_rings(vsi);
10505 /* map all of the rings to the q_vectors */
10506 i40e_vsi_map_rings_to_vectors(vsi);
10508 i40e_vsi_reset_stats(vsi);
10512 /* no netdev or rings for the other VSI types */
10516 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
10517 (vsi->type == I40E_VSI_VMDQ2)) {
10518 ret = i40e_vsi_config_rss(vsi);
10523 i40e_vsi_free_q_vectors(vsi);
10525 if (vsi->netdev_registered) {
10526 vsi->netdev_registered = false;
10527 unregister_netdev(vsi->netdev);
10528 free_netdev(vsi->netdev);
10529 vsi->netdev = NULL;
10532 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
10534 i40e_vsi_clear(vsi);
10540 * i40e_veb_get_bw_info - Query VEB BW information
10541 * @veb: the veb to query
10543 * Query the Tx scheduler BW configuration data for given VEB
10545 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
10547 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
10548 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
10549 struct i40e_pf *pf = veb->pf;
10550 struct i40e_hw *hw = &pf->hw;
10555 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10558 dev_info(&pf->pdev->dev,
10559 "query veb bw config failed, err %s aq_err %s\n",
10560 i40e_stat_str(&pf->hw, ret),
10561 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
10565 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10568 dev_info(&pf->pdev->dev,
10569 "query veb bw ets config failed, err %s aq_err %s\n",
10570 i40e_stat_str(&pf->hw, ret),
10571 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
10575 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
10576 veb->bw_max_quanta = ets_data.tc_bw_max;
10577 veb->is_abs_credits = bw_data.absolute_credits_enable;
10578 veb->enabled_tc = ets_data.tc_valid_bits;
10579 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
10580 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
10581 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10582 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
10583 veb->bw_tc_limit_credits[i] =
10584 le16_to_cpu(bw_data.tc_bw_limits[i]);
10585 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
10593 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
10594 * @pf: board private structure
10596 * On error: returns error code (negative)
10597 * On success: returns vsi index in PF (positive)
10599 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
10602 struct i40e_veb *veb;
10605 /* Need to protect the allocation of switch elements at the PF level */
10606 mutex_lock(&pf->switch_mutex);
10608 /* VEB list may be fragmented if VEB creation/destruction has
10609 * been happening. We can afford to do a quick scan to look
10610 * for any free slots in the list.
10612 * find next empty veb slot, looping back around if necessary
10615 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
10617 if (i >= I40E_MAX_VEB) {
10619 goto err_alloc_veb; /* out of VEB slots! */
10622 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
10625 goto err_alloc_veb;
10629 veb->enabled_tc = 1;
10634 mutex_unlock(&pf->switch_mutex);
10639 * i40e_switch_branch_release - Delete a branch of the switch tree
10640 * @branch: where to start deleting
10642 * This uses recursion to find the tips of the branch to be
10643 * removed, deleting until we get back to and can delete this VEB.
10645 static void i40e_switch_branch_release(struct i40e_veb *branch)
10647 struct i40e_pf *pf = branch->pf;
10648 u16 branch_seid = branch->seid;
10649 u16 veb_idx = branch->idx;
10652 /* release any VEBs on this VEB - RECURSION */
10653 for (i = 0; i < I40E_MAX_VEB; i++) {
10656 if (pf->veb[i]->uplink_seid == branch->seid)
10657 i40e_switch_branch_release(pf->veb[i]);
10660 /* Release the VSIs on this VEB, but not the owner VSI.
10662 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
10663 * the VEB itself, so don't use (*branch) after this loop.
10665 for (i = 0; i < pf->num_alloc_vsi; i++) {
10668 if (pf->vsi[i]->uplink_seid == branch_seid &&
10669 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
10670 i40e_vsi_release(pf->vsi[i]);
10674 /* There's one corner case where the VEB might not have been
10675 * removed, so double check it here and remove it if needed.
10676 * This case happens if the veb was created from the debugfs
10677 * commands and no VSIs were added to it.
10679 if (pf->veb[veb_idx])
10680 i40e_veb_release(pf->veb[veb_idx]);
10684 * i40e_veb_clear - remove veb struct
10685 * @veb: the veb to remove
10687 static void i40e_veb_clear(struct i40e_veb *veb)
10693 struct i40e_pf *pf = veb->pf;
10695 mutex_lock(&pf->switch_mutex);
10696 if (pf->veb[veb->idx] == veb)
10697 pf->veb[veb->idx] = NULL;
10698 mutex_unlock(&pf->switch_mutex);
10705 * i40e_veb_release - Delete a VEB and free its resources
10706 * @veb: the VEB being removed
10708 void i40e_veb_release(struct i40e_veb *veb)
10710 struct i40e_vsi *vsi = NULL;
10711 struct i40e_pf *pf;
10716 /* find the remaining VSI and check for extras */
10717 for (i = 0; i < pf->num_alloc_vsi; i++) {
10718 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
10724 dev_info(&pf->pdev->dev,
10725 "can't remove VEB %d with %d VSIs left\n",
10730 /* move the remaining VSI to uplink veb */
10731 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
10732 if (veb->uplink_seid) {
10733 vsi->uplink_seid = veb->uplink_seid;
10734 if (veb->uplink_seid == pf->mac_seid)
10735 vsi->veb_idx = I40E_NO_VEB;
10737 vsi->veb_idx = veb->veb_idx;
10740 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10741 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
10744 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10745 i40e_veb_clear(veb);
10749 * i40e_add_veb - create the VEB in the switch
10750 * @veb: the VEB to be instantiated
10751 * @vsi: the controlling VSI
10753 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
10755 struct i40e_pf *pf = veb->pf;
10756 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
10759 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
10760 veb->enabled_tc, false,
10761 &veb->seid, enable_stats, NULL);
10763 /* get a VEB from the hardware */
10765 dev_info(&pf->pdev->dev,
10766 "couldn't add VEB, err %s aq_err %s\n",
10767 i40e_stat_str(&pf->hw, ret),
10768 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10772 /* get statistics counter */
10773 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
10774 &veb->stats_idx, NULL, NULL, NULL);
10776 dev_info(&pf->pdev->dev,
10777 "couldn't get VEB statistics idx, err %s aq_err %s\n",
10778 i40e_stat_str(&pf->hw, ret),
10779 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10782 ret = i40e_veb_get_bw_info(veb);
10784 dev_info(&pf->pdev->dev,
10785 "couldn't get VEB bw info, err %s aq_err %s\n",
10786 i40e_stat_str(&pf->hw, ret),
10787 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10788 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10792 vsi->uplink_seid = veb->seid;
10793 vsi->veb_idx = veb->idx;
10794 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10800 * i40e_veb_setup - Set up a VEB
10801 * @pf: board private structure
10802 * @flags: VEB setup flags
10803 * @uplink_seid: the switch element to link to
10804 * @vsi_seid: the initial VSI seid
10805 * @enabled_tc: Enabled TC bit-map
10807 * This allocates the sw VEB structure and links it into the switch
10808 * It is possible and legal for this to be a duplicate of an already
10809 * existing VEB. It is also possible for both uplink and vsi seids
10810 * to be zero, in order to create a floating VEB.
10812 * Returns pointer to the successfully allocated VEB sw struct on
10813 * success, otherwise returns NULL on failure.
10815 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
10816 u16 uplink_seid, u16 vsi_seid,
10819 struct i40e_veb *veb, *uplink_veb = NULL;
10820 int vsi_idx, veb_idx;
10823 /* if one seid is 0, the other must be 0 to create a floating relay */
10824 if ((uplink_seid == 0 || vsi_seid == 0) &&
10825 (uplink_seid + vsi_seid != 0)) {
10826 dev_info(&pf->pdev->dev,
10827 "one, not both seid's are 0: uplink=%d vsi=%d\n",
10828 uplink_seid, vsi_seid);
10832 /* make sure there is such a vsi and uplink */
10833 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
10834 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
10836 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
10837 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
10842 if (uplink_seid && uplink_seid != pf->mac_seid) {
10843 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10844 if (pf->veb[veb_idx] &&
10845 pf->veb[veb_idx]->seid == uplink_seid) {
10846 uplink_veb = pf->veb[veb_idx];
10851 dev_info(&pf->pdev->dev,
10852 "uplink seid %d not found\n", uplink_seid);
10857 /* get veb sw struct */
10858 veb_idx = i40e_veb_mem_alloc(pf);
10861 veb = pf->veb[veb_idx];
10862 veb->flags = flags;
10863 veb->uplink_seid = uplink_seid;
10864 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
10865 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
10867 /* create the VEB in the switch */
10868 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
10871 if (vsi_idx == pf->lan_vsi)
10872 pf->lan_veb = veb->idx;
10877 i40e_veb_clear(veb);
10883 * i40e_setup_pf_switch_element - set PF vars based on switch type
10884 * @pf: board private structure
10885 * @ele: element we are building info from
10886 * @num_reported: total number of elements
10887 * @printconfig: should we print the contents
10889 * helper function to assist in extracting a few useful SEID values.
10891 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
10892 struct i40e_aqc_switch_config_element_resp *ele,
10893 u16 num_reported, bool printconfig)
10895 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
10896 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
10897 u8 element_type = ele->element_type;
10898 u16 seid = le16_to_cpu(ele->seid);
10901 dev_info(&pf->pdev->dev,
10902 "type=%d seid=%d uplink=%d downlink=%d\n",
10903 element_type, seid, uplink_seid, downlink_seid);
10905 switch (element_type) {
10906 case I40E_SWITCH_ELEMENT_TYPE_MAC:
10907 pf->mac_seid = seid;
10909 case I40E_SWITCH_ELEMENT_TYPE_VEB:
10911 if (uplink_seid != pf->mac_seid)
10913 if (pf->lan_veb == I40E_NO_VEB) {
10916 /* find existing or else empty VEB */
10917 for (v = 0; v < I40E_MAX_VEB; v++) {
10918 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
10923 if (pf->lan_veb == I40E_NO_VEB) {
10924 v = i40e_veb_mem_alloc(pf);
10931 pf->veb[pf->lan_veb]->seid = seid;
10932 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
10933 pf->veb[pf->lan_veb]->pf = pf;
10934 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
10936 case I40E_SWITCH_ELEMENT_TYPE_VSI:
10937 if (num_reported != 1)
10939 /* This is immediately after a reset so we can assume this is
10942 pf->mac_seid = uplink_seid;
10943 pf->pf_seid = downlink_seid;
10944 pf->main_vsi_seid = seid;
10946 dev_info(&pf->pdev->dev,
10947 "pf_seid=%d main_vsi_seid=%d\n",
10948 pf->pf_seid, pf->main_vsi_seid);
10950 case I40E_SWITCH_ELEMENT_TYPE_PF:
10951 case I40E_SWITCH_ELEMENT_TYPE_VF:
10952 case I40E_SWITCH_ELEMENT_TYPE_EMP:
10953 case I40E_SWITCH_ELEMENT_TYPE_BMC:
10954 case I40E_SWITCH_ELEMENT_TYPE_PE:
10955 case I40E_SWITCH_ELEMENT_TYPE_PA:
10956 /* ignore these for now */
10959 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
10960 element_type, seid);
10966 * i40e_fetch_switch_configuration - Get switch config from firmware
10967 * @pf: board private structure
10968 * @printconfig: should we print the contents
10970 * Get the current switch configuration from the device and
10971 * extract a few useful SEID values.
10973 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
10975 struct i40e_aqc_get_switch_config_resp *sw_config;
10981 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
10985 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
10987 u16 num_reported, num_total;
10989 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
10993 dev_info(&pf->pdev->dev,
10994 "get switch config failed err %s aq_err %s\n",
10995 i40e_stat_str(&pf->hw, ret),
10996 i40e_aq_str(&pf->hw,
10997 pf->hw.aq.asq_last_status));
11002 num_reported = le16_to_cpu(sw_config->header.num_reported);
11003 num_total = le16_to_cpu(sw_config->header.num_total);
11006 dev_info(&pf->pdev->dev,
11007 "header: %d reported %d total\n",
11008 num_reported, num_total);
11010 for (i = 0; i < num_reported; i++) {
11011 struct i40e_aqc_switch_config_element_resp *ele =
11012 &sw_config->element[i];
11014 i40e_setup_pf_switch_element(pf, ele, num_reported,
11017 } while (next_seid != 0);
11024 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
11025 * @pf: board private structure
11026 * @reinit: if the Main VSI needs to re-initialized.
11028 * Returns 0 on success, negative value on failure
11030 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
11035 /* find out what's out there already */
11036 ret = i40e_fetch_switch_configuration(pf, false);
11038 dev_info(&pf->pdev->dev,
11039 "couldn't fetch switch config, err %s aq_err %s\n",
11040 i40e_stat_str(&pf->hw, ret),
11041 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11044 i40e_pf_reset_stats(pf);
11046 /* set the switch config bit for the whole device to
11047 * support limited promisc or true promisc
11048 * when user requests promisc. The default is limited
11052 if ((pf->hw.pf_id == 0) &&
11053 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
11054 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
11056 if (pf->hw.pf_id == 0) {
11059 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
11060 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
11062 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
11063 dev_info(&pf->pdev->dev,
11064 "couldn't set switch config bits, err %s aq_err %s\n",
11065 i40e_stat_str(&pf->hw, ret),
11066 i40e_aq_str(&pf->hw,
11067 pf->hw.aq.asq_last_status));
11068 /* not a fatal problem, just keep going */
11072 /* first time setup */
11073 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
11074 struct i40e_vsi *vsi = NULL;
11077 /* Set up the PF VSI associated with the PF's main VSI
11078 * that is already in the HW switch
11080 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
11081 uplink_seid = pf->veb[pf->lan_veb]->seid;
11083 uplink_seid = pf->mac_seid;
11084 if (pf->lan_vsi == I40E_NO_VSI)
11085 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
11087 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
11089 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
11090 i40e_fdir_teardown(pf);
11094 /* force a reset of TC and queue layout configurations */
11095 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
11097 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
11098 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
11099 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
11101 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
11103 i40e_fdir_sb_setup(pf);
11105 /* Setup static PF queue filter control settings */
11106 ret = i40e_setup_pf_filter_control(pf);
11108 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
11110 /* Failure here should not stop continuing other steps */
11113 /* enable RSS in the HW, even for only one queue, as the stack can use
11116 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
11117 i40e_pf_config_rss(pf);
11119 /* fill in link information and enable LSE reporting */
11120 i40e_link_event(pf);
11122 /* Initialize user-specific link properties */
11123 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
11124 I40E_AQ_AN_COMPLETED) ? true : false);
11128 /* repopulate tunnel port filters */
11129 i40e_sync_udp_filters(pf);
11135 * i40e_determine_queue_usage - Work out queue distribution
11136 * @pf: board private structure
11138 static void i40e_determine_queue_usage(struct i40e_pf *pf)
11143 pf->num_lan_qps = 0;
11145 /* Find the max queues to be put into basic use. We'll always be
11146 * using TC0, whether or not DCB is running, and TC0 will get the
11149 queues_left = pf->hw.func_caps.num_tx_qp;
11151 if ((queues_left == 1) ||
11152 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
11153 /* one qp for PF, no queues for anything else */
11155 pf->alloc_rss_size = pf->num_lan_qps = 1;
11157 /* make sure all the fancies are disabled */
11158 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
11159 I40E_FLAG_IWARP_ENABLED |
11160 I40E_FLAG_FD_SB_ENABLED |
11161 I40E_FLAG_FD_ATR_ENABLED |
11162 I40E_FLAG_DCB_CAPABLE |
11163 I40E_FLAG_DCB_ENABLED |
11164 I40E_FLAG_SRIOV_ENABLED |
11165 I40E_FLAG_VMDQ_ENABLED);
11166 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
11167 I40E_FLAG_FD_SB_ENABLED |
11168 I40E_FLAG_FD_ATR_ENABLED |
11169 I40E_FLAG_DCB_CAPABLE))) {
11170 /* one qp for PF */
11171 pf->alloc_rss_size = pf->num_lan_qps = 1;
11172 queues_left -= pf->num_lan_qps;
11174 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
11175 I40E_FLAG_IWARP_ENABLED |
11176 I40E_FLAG_FD_SB_ENABLED |
11177 I40E_FLAG_FD_ATR_ENABLED |
11178 I40E_FLAG_DCB_ENABLED |
11179 I40E_FLAG_VMDQ_ENABLED);
11181 /* Not enough queues for all TCs */
11182 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
11183 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
11184 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
11185 I40E_FLAG_DCB_ENABLED);
11186 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
11189 /* limit lan qps to the smaller of qps, cpus or msix */
11190 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
11191 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
11192 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
11193 pf->num_lan_qps = q_max;
11195 queues_left -= pf->num_lan_qps;
11198 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11199 if (queues_left > 1) {
11200 queues_left -= 1; /* save 1 queue for FD */
11202 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11203 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
11207 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11208 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
11209 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
11210 (queues_left / pf->num_vf_qps));
11211 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
11214 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11215 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
11216 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
11217 (queues_left / pf->num_vmdq_qps));
11218 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
11221 pf->queues_left = queues_left;
11222 dev_dbg(&pf->pdev->dev,
11223 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
11224 pf->hw.func_caps.num_tx_qp,
11225 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
11226 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
11227 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
11232 * i40e_setup_pf_filter_control - Setup PF static filter control
11233 * @pf: PF to be setup
11235 * i40e_setup_pf_filter_control sets up a PF's initial filter control
11236 * settings. If PE/FCoE are enabled then it will also set the per PF
11237 * based filter sizes required for them. It also enables Flow director,
11238 * ethertype and macvlan type filter settings for the pf.
11240 * Returns 0 on success, negative on failure
11242 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
11244 struct i40e_filter_control_settings *settings = &pf->filter_settings;
11246 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
11248 /* Flow Director is enabled */
11249 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
11250 settings->enable_fdir = true;
11252 /* Ethtype and MACVLAN filters enabled for PF */
11253 settings->enable_ethtype = true;
11254 settings->enable_macvlan = true;
11256 if (i40e_set_filter_control(&pf->hw, settings))
11262 #define INFO_STRING_LEN 255
11263 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
11264 static void i40e_print_features(struct i40e_pf *pf)
11266 struct i40e_hw *hw = &pf->hw;
11270 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
11274 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
11275 #ifdef CONFIG_PCI_IOV
11276 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
11278 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
11279 pf->hw.func_caps.num_vsis,
11280 pf->vsi[pf->lan_vsi]->num_queue_pairs);
11281 if (pf->flags & I40E_FLAG_RSS_ENABLED)
11282 i += snprintf(&buf[i], REMAIN(i), " RSS");
11283 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
11284 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
11285 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11286 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
11287 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
11289 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
11290 i += snprintf(&buf[i], REMAIN(i), " DCB");
11291 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
11292 i += snprintf(&buf[i], REMAIN(i), " Geneve");
11293 if (pf->flags & I40E_FLAG_PTP)
11294 i += snprintf(&buf[i], REMAIN(i), " PTP");
11295 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
11296 i += snprintf(&buf[i], REMAIN(i), " VEB");
11298 i += snprintf(&buf[i], REMAIN(i), " VEPA");
11300 dev_info(&pf->pdev->dev, "%s\n", buf);
11302 WARN_ON(i > INFO_STRING_LEN);
11306 * i40e_get_platform_mac_addr - get platform-specific MAC address
11307 * @pdev: PCI device information struct
11308 * @pf: board private structure
11310 * Look up the MAC address for the device. First we'll try
11311 * eth_platform_get_mac_address, which will check Open Firmware, or arch
11312 * specific fallback. Otherwise, we'll default to the stored value in
11315 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
11317 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
11318 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
11322 * i40e_probe - Device initialization routine
11323 * @pdev: PCI device information struct
11324 * @ent: entry in i40e_pci_tbl
11326 * i40e_probe initializes a PF identified by a pci_dev structure.
11327 * The OS initialization, configuring of the PF private structure,
11328 * and a hardware reset occur.
11330 * Returns 0 on success, negative on failure
11332 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11334 struct i40e_aq_get_phy_abilities_resp abilities;
11335 struct i40e_pf *pf;
11336 struct i40e_hw *hw;
11337 static u16 pfs_found;
11345 err = pci_enable_device_mem(pdev);
11349 /* set up for high or low dma */
11350 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11352 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11354 dev_err(&pdev->dev,
11355 "DMA configuration failed: 0x%x\n", err);
11360 /* set up pci connections */
11361 err = pci_request_mem_regions(pdev, i40e_driver_name);
11363 dev_info(&pdev->dev,
11364 "pci_request_selected_regions failed %d\n", err);
11368 pci_enable_pcie_error_reporting(pdev);
11369 pci_set_master(pdev);
11371 /* Now that we have a PCI connection, we need to do the
11372 * low level device setup. This is primarily setting up
11373 * the Admin Queue structures and then querying for the
11374 * device's current profile information.
11376 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
11383 set_bit(__I40E_DOWN, pf->state);
11388 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
11389 I40E_MAX_CSR_SPACE);
11391 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
11392 if (!hw->hw_addr) {
11394 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
11395 (unsigned int)pci_resource_start(pdev, 0),
11396 pf->ioremap_len, err);
11399 hw->vendor_id = pdev->vendor;
11400 hw->device_id = pdev->device;
11401 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
11402 hw->subsystem_vendor_id = pdev->subsystem_vendor;
11403 hw->subsystem_device_id = pdev->subsystem_device;
11404 hw->bus.device = PCI_SLOT(pdev->devfn);
11405 hw->bus.func = PCI_FUNC(pdev->devfn);
11406 hw->bus.bus_id = pdev->bus->number;
11407 pf->instance = pfs_found;
11409 /* Select something other than the 802.1ad ethertype for the
11410 * switch to use internally and drop on ingress.
11412 hw->switch_tag = 0xffff;
11413 hw->first_tag = ETH_P_8021AD;
11414 hw->second_tag = ETH_P_8021Q;
11416 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
11417 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
11419 /* set up the locks for the AQ, do this only once in probe
11420 * and destroy them only once in remove
11422 mutex_init(&hw->aq.asq_mutex);
11423 mutex_init(&hw->aq.arq_mutex);
11425 pf->msg_enable = netif_msg_init(debug,
11430 pf->hw.debug_mask = debug;
11432 /* do a special CORER for clearing PXE mode once at init */
11433 if (hw->revision_id == 0 &&
11434 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
11435 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
11440 i40e_clear_pxe_mode(hw);
11443 /* Reset here to make sure all is clean and to define PF 'n' */
11445 err = i40e_pf_reset(hw);
11447 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
11452 hw->aq.num_arq_entries = I40E_AQ_LEN;
11453 hw->aq.num_asq_entries = I40E_AQ_LEN;
11454 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
11455 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
11456 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
11458 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
11460 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
11462 err = i40e_init_shared_code(hw);
11464 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
11469 /* set up a default setting for link flow control */
11470 pf->hw.fc.requested_mode = I40E_FC_NONE;
11472 err = i40e_init_adminq(hw);
11474 if (err == I40E_ERR_FIRMWARE_API_VERSION)
11475 dev_info(&pdev->dev,
11476 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
11478 dev_info(&pdev->dev,
11479 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
11483 i40e_get_oem_version(hw);
11485 /* provide nvm, fw, api versions */
11486 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
11487 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
11488 hw->aq.api_maj_ver, hw->aq.api_min_ver,
11489 i40e_nvm_version_str(hw));
11491 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
11492 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
11493 dev_info(&pdev->dev,
11494 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
11495 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
11496 dev_info(&pdev->dev,
11497 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
11499 i40e_verify_eeprom(pf);
11501 /* Rev 0 hardware was never productized */
11502 if (hw->revision_id < 1)
11503 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
11505 i40e_clear_pxe_mode(hw);
11506 err = i40e_get_capabilities(pf);
11508 goto err_adminq_setup;
11510 err = i40e_sw_init(pf);
11512 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
11516 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
11517 hw->func_caps.num_rx_qp, 0, 0);
11519 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
11520 goto err_init_lan_hmc;
11523 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
11525 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
11527 goto err_configure_lan_hmc;
11530 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
11531 * Ignore error return codes because if it was already disabled via
11532 * hardware settings this will fail
11534 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
11535 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
11536 i40e_aq_stop_lldp(hw, true, NULL);
11539 /* allow a platform config to override the HW addr */
11540 i40e_get_platform_mac_addr(pdev, pf);
11542 if (!is_valid_ether_addr(hw->mac.addr)) {
11543 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
11547 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
11548 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
11549 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
11550 if (is_valid_ether_addr(hw->mac.port_addr))
11551 pf->hw_features |= I40E_HW_PORT_ID_VALID;
11553 pci_set_drvdata(pdev, pf);
11554 pci_save_state(pdev);
11555 #ifdef CONFIG_I40E_DCB
11556 err = i40e_init_pf_dcb(pf);
11558 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
11559 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
11560 /* Continue without DCB enabled */
11562 #endif /* CONFIG_I40E_DCB */
11564 /* set up periodic task facility */
11565 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
11566 pf->service_timer_period = HZ;
11568 INIT_WORK(&pf->service_task, i40e_service_task);
11569 clear_bit(__I40E_SERVICE_SCHED, pf->state);
11571 /* NVM bit on means WoL disabled for the port */
11572 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
11573 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
11574 pf->wol_en = false;
11577 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
11579 /* set up the main switch operations */
11580 i40e_determine_queue_usage(pf);
11581 err = i40e_init_interrupt_scheme(pf);
11583 goto err_switch_setup;
11585 /* The number of VSIs reported by the FW is the minimum guaranteed
11586 * to us; HW supports far more and we share the remaining pool with
11587 * the other PFs. We allocate space for more than the guarantee with
11588 * the understanding that we might not get them all later.
11590 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
11591 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
11593 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
11595 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
11596 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
11600 goto err_switch_setup;
11603 #ifdef CONFIG_PCI_IOV
11604 /* prep for VF support */
11605 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11606 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11607 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
11608 if (pci_num_vf(pdev))
11609 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11612 err = i40e_setup_pf_switch(pf, false);
11614 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
11618 /* Make sure flow control is set according to current settings */
11619 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
11620 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
11621 dev_dbg(&pf->pdev->dev,
11622 "Set fc with err %s aq_err %s on get_phy_cap\n",
11623 i40e_stat_str(hw, err),
11624 i40e_aq_str(hw, hw->aq.asq_last_status));
11625 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
11626 dev_dbg(&pf->pdev->dev,
11627 "Set fc with err %s aq_err %s on set_phy_config\n",
11628 i40e_stat_str(hw, err),
11629 i40e_aq_str(hw, hw->aq.asq_last_status));
11630 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
11631 dev_dbg(&pf->pdev->dev,
11632 "Set fc with err %s aq_err %s on get_link_info\n",
11633 i40e_stat_str(hw, err),
11634 i40e_aq_str(hw, hw->aq.asq_last_status));
11636 /* if FDIR VSI was set up, start it now */
11637 for (i = 0; i < pf->num_alloc_vsi; i++) {
11638 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
11639 i40e_vsi_open(pf->vsi[i]);
11644 /* The driver only wants link up/down and module qualification
11645 * reports from firmware. Note the negative logic.
11647 err = i40e_aq_set_phy_int_mask(&pf->hw,
11648 ~(I40E_AQ_EVENT_LINK_UPDOWN |
11649 I40E_AQ_EVENT_MEDIA_NA |
11650 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
11652 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
11653 i40e_stat_str(&pf->hw, err),
11654 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11656 /* Reconfigure hardware for allowing smaller MSS in the case
11657 * of TSO, so that we avoid the MDD being fired and causing
11658 * a reset in the case of small MSS+TSO.
11660 val = rd32(hw, I40E_REG_MSS);
11661 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11662 val &= ~I40E_REG_MSS_MIN_MASK;
11663 val |= I40E_64BYTE_MSS;
11664 wr32(hw, I40E_REG_MSS, val);
11667 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
11669 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
11671 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
11672 i40e_stat_str(&pf->hw, err),
11673 i40e_aq_str(&pf->hw,
11674 pf->hw.aq.asq_last_status));
11676 /* The main driver is (mostly) up and happy. We need to set this state
11677 * before setting up the misc vector or we get a race and the vector
11678 * ends up disabled forever.
11680 clear_bit(__I40E_DOWN, pf->state);
11682 /* In case of MSIX we are going to setup the misc vector right here
11683 * to handle admin queue events etc. In case of legacy and MSI
11684 * the misc functionality and queue processing is combined in
11685 * the same vector and that gets setup at open.
11687 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11688 err = i40e_setup_misc_vector(pf);
11690 dev_info(&pdev->dev,
11691 "setup of misc vector failed: %d\n", err);
11696 #ifdef CONFIG_PCI_IOV
11697 /* prep for VF support */
11698 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11699 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11700 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
11701 /* disable link interrupts for VFs */
11702 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
11703 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
11704 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
11707 if (pci_num_vf(pdev)) {
11708 dev_info(&pdev->dev,
11709 "Active VFs found, allocating resources.\n");
11710 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
11712 dev_info(&pdev->dev,
11713 "Error %d allocating resources for existing VFs\n",
11717 #endif /* CONFIG_PCI_IOV */
11719 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11720 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
11721 pf->num_iwarp_msix,
11722 I40E_IWARP_IRQ_PILE_ID);
11723 if (pf->iwarp_base_vector < 0) {
11724 dev_info(&pdev->dev,
11725 "failed to get tracking for %d vectors for IWARP err=%d\n",
11726 pf->num_iwarp_msix, pf->iwarp_base_vector);
11727 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11731 i40e_dbg_pf_init(pf);
11733 /* tell the firmware that we're starting */
11734 i40e_send_version(pf);
11736 /* since everything's happy, start the service_task timer */
11737 mod_timer(&pf->service_timer,
11738 round_jiffies(jiffies + pf->service_timer_period));
11740 /* add this PF to client device list and launch a client service task */
11741 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11742 err = i40e_lan_add_device(pf);
11744 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
11748 #define PCI_SPEED_SIZE 8
11749 #define PCI_WIDTH_SIZE 8
11750 /* Devices on the IOSF bus do not have this information
11751 * and will report PCI Gen 1 x 1 by default so don't bother
11754 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
11755 char speed[PCI_SPEED_SIZE] = "Unknown";
11756 char width[PCI_WIDTH_SIZE] = "Unknown";
11758 /* Get the negotiated link width and speed from PCI config
11761 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
11764 i40e_set_pci_config_data(hw, link_status);
11766 switch (hw->bus.speed) {
11767 case i40e_bus_speed_8000:
11768 strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
11769 case i40e_bus_speed_5000:
11770 strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
11771 case i40e_bus_speed_2500:
11772 strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
11776 switch (hw->bus.width) {
11777 case i40e_bus_width_pcie_x8:
11778 strncpy(width, "8", PCI_WIDTH_SIZE); break;
11779 case i40e_bus_width_pcie_x4:
11780 strncpy(width, "4", PCI_WIDTH_SIZE); break;
11781 case i40e_bus_width_pcie_x2:
11782 strncpy(width, "2", PCI_WIDTH_SIZE); break;
11783 case i40e_bus_width_pcie_x1:
11784 strncpy(width, "1", PCI_WIDTH_SIZE); break;
11789 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
11792 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
11793 hw->bus.speed < i40e_bus_speed_8000) {
11794 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
11795 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
11799 /* get the requested speeds from the fw */
11800 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
11802 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
11803 i40e_stat_str(&pf->hw, err),
11804 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11805 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
11807 /* get the supported phy types from the fw */
11808 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
11810 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
11811 i40e_stat_str(&pf->hw, err),
11812 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11814 /* Add a filter to drop all Flow control frames from any VSI from being
11815 * transmitted. By doing so we stop a malicious VF from sending out
11816 * PAUSE or PFC frames and potentially controlling traffic for other
11818 * The FW can still send Flow control frames if enabled.
11820 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11821 pf->main_vsi_seid);
11823 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
11824 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
11825 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
11826 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
11827 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
11828 /* print a string summarizing features */
11829 i40e_print_features(pf);
11833 /* Unwind what we've done if something failed in the setup */
11835 set_bit(__I40E_DOWN, pf->state);
11836 i40e_clear_interrupt_scheme(pf);
11839 i40e_reset_interrupt_capability(pf);
11840 del_timer_sync(&pf->service_timer);
11842 err_configure_lan_hmc:
11843 (void)i40e_shutdown_lan_hmc(hw);
11845 kfree(pf->qp_pile);
11849 iounmap(hw->hw_addr);
11853 pci_disable_pcie_error_reporting(pdev);
11854 pci_release_mem_regions(pdev);
11857 pci_disable_device(pdev);
11862 * i40e_remove - Device removal routine
11863 * @pdev: PCI device information struct
11865 * i40e_remove is called by the PCI subsystem to alert the driver
11866 * that is should release a PCI device. This could be caused by a
11867 * Hot-Plug event, or because the driver is going to be removed from
11870 static void i40e_remove(struct pci_dev *pdev)
11872 struct i40e_pf *pf = pci_get_drvdata(pdev);
11873 struct i40e_hw *hw = &pf->hw;
11874 i40e_status ret_code;
11877 i40e_dbg_pf_exit(pf);
11881 /* Disable RSS in hw */
11882 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
11883 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
11885 /* no more scheduling of any task */
11886 set_bit(__I40E_SUSPENDED, pf->state);
11887 set_bit(__I40E_DOWN, pf->state);
11888 if (pf->service_timer.data)
11889 del_timer_sync(&pf->service_timer);
11890 if (pf->service_task.func)
11891 cancel_work_sync(&pf->service_task);
11893 /* Client close must be called explicitly here because the timer
11894 * has been stopped.
11896 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
11898 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
11900 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
11903 i40e_fdir_teardown(pf);
11905 /* If there is a switch structure or any orphans, remove them.
11906 * This will leave only the PF's VSI remaining.
11908 for (i = 0; i < I40E_MAX_VEB; i++) {
11912 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
11913 pf->veb[i]->uplink_seid == 0)
11914 i40e_switch_branch_release(pf->veb[i]);
11917 /* Now we can shutdown the PF's VSI, just before we kill
11920 if (pf->vsi[pf->lan_vsi])
11921 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
11923 /* remove attached clients */
11924 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11925 ret_code = i40e_lan_del_device(pf);
11927 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
11931 /* shutdown and destroy the HMC */
11932 if (hw->hmc.hmc_obj) {
11933 ret_code = i40e_shutdown_lan_hmc(hw);
11935 dev_warn(&pdev->dev,
11936 "Failed to destroy the HMC resources: %d\n",
11940 /* shutdown the adminq */
11941 i40e_shutdown_adminq(hw);
11943 /* destroy the locks only once, here */
11944 mutex_destroy(&hw->aq.arq_mutex);
11945 mutex_destroy(&hw->aq.asq_mutex);
11947 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
11948 i40e_clear_interrupt_scheme(pf);
11949 for (i = 0; i < pf->num_alloc_vsi; i++) {
11951 i40e_vsi_clear_rings(pf->vsi[i]);
11952 i40e_vsi_clear(pf->vsi[i]);
11957 for (i = 0; i < I40E_MAX_VEB; i++) {
11962 kfree(pf->qp_pile);
11965 iounmap(hw->hw_addr);
11967 pci_release_mem_regions(pdev);
11969 pci_disable_pcie_error_reporting(pdev);
11970 pci_disable_device(pdev);
11974 * i40e_pci_error_detected - warning that something funky happened in PCI land
11975 * @pdev: PCI device information struct
11977 * Called to warn that something happened and the error handling steps
11978 * are in progress. Allows the driver to quiesce things, be ready for
11981 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
11982 enum pci_channel_state error)
11984 struct i40e_pf *pf = pci_get_drvdata(pdev);
11986 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
11989 dev_info(&pdev->dev,
11990 "Cannot recover - error happened during device probe\n");
11991 return PCI_ERS_RESULT_DISCONNECT;
11994 /* shutdown all operations */
11995 if (!test_bit(__I40E_SUSPENDED, pf->state))
11996 i40e_prep_for_reset(pf, false);
11998 /* Request a slot reset */
11999 return PCI_ERS_RESULT_NEED_RESET;
12003 * i40e_pci_error_slot_reset - a PCI slot reset just happened
12004 * @pdev: PCI device information struct
12006 * Called to find if the driver can work with the device now that
12007 * the pci slot has been reset. If a basic connection seems good
12008 * (registers are readable and have sane content) then return a
12009 * happy little PCI_ERS_RESULT_xxx.
12011 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
12013 struct i40e_pf *pf = pci_get_drvdata(pdev);
12014 pci_ers_result_t result;
12018 dev_dbg(&pdev->dev, "%s\n", __func__);
12019 if (pci_enable_device_mem(pdev)) {
12020 dev_info(&pdev->dev,
12021 "Cannot re-enable PCI device after reset.\n");
12022 result = PCI_ERS_RESULT_DISCONNECT;
12024 pci_set_master(pdev);
12025 pci_restore_state(pdev);
12026 pci_save_state(pdev);
12027 pci_wake_from_d3(pdev, false);
12029 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
12031 result = PCI_ERS_RESULT_RECOVERED;
12033 result = PCI_ERS_RESULT_DISCONNECT;
12036 err = pci_cleanup_aer_uncorrect_error_status(pdev);
12038 dev_info(&pdev->dev,
12039 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
12041 /* non-fatal, continue */
12048 * i40e_pci_error_resume - restart operations after PCI error recovery
12049 * @pdev: PCI device information struct
12051 * Called to allow the driver to bring things back up after PCI error
12052 * and/or reset recovery has finished.
12054 static void i40e_pci_error_resume(struct pci_dev *pdev)
12056 struct i40e_pf *pf = pci_get_drvdata(pdev);
12058 dev_dbg(&pdev->dev, "%s\n", __func__);
12059 if (test_bit(__I40E_SUSPENDED, pf->state))
12062 i40e_handle_reset_warning(pf, false);
12066 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
12067 * using the mac_address_write admin q function
12068 * @pf: pointer to i40e_pf struct
12070 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
12072 struct i40e_hw *hw = &pf->hw;
12077 /* Get current MAC address in case it's an LAA */
12078 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
12079 ether_addr_copy(mac_addr,
12080 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
12082 dev_err(&pf->pdev->dev,
12083 "Failed to retrieve MAC address; using default\n");
12084 ether_addr_copy(mac_addr, hw->mac.addr);
12087 /* The FW expects the mac address write cmd to first be called with
12088 * one of these flags before calling it again with the multicast
12091 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
12093 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
12094 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
12096 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
12098 dev_err(&pf->pdev->dev,
12099 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
12103 flags = I40E_AQC_MC_MAG_EN
12104 | I40E_AQC_WOL_PRESERVE_ON_PFR
12105 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
12106 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
12108 dev_err(&pf->pdev->dev,
12109 "Failed to enable Multicast Magic Packet wake up\n");
12113 * i40e_shutdown - PCI callback for shutting down
12114 * @pdev: PCI device information struct
12116 static void i40e_shutdown(struct pci_dev *pdev)
12118 struct i40e_pf *pf = pci_get_drvdata(pdev);
12119 struct i40e_hw *hw = &pf->hw;
12121 set_bit(__I40E_SUSPENDED, pf->state);
12122 set_bit(__I40E_DOWN, pf->state);
12124 i40e_prep_for_reset(pf, true);
12127 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
12128 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
12130 del_timer_sync(&pf->service_timer);
12131 cancel_work_sync(&pf->service_task);
12132 i40e_fdir_teardown(pf);
12134 /* Client close must be called explicitly here because the timer
12135 * has been stopped.
12137 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
12139 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
12140 i40e_enable_mc_magic_wake(pf);
12142 i40e_prep_for_reset(pf, false);
12144 wr32(hw, I40E_PFPM_APM,
12145 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
12146 wr32(hw, I40E_PFPM_WUFC,
12147 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
12149 i40e_clear_interrupt_scheme(pf);
12151 if (system_state == SYSTEM_POWER_OFF) {
12152 pci_wake_from_d3(pdev, pf->wol_en);
12153 pci_set_power_state(pdev, PCI_D3hot);
12159 * i40e_suspend - PM callback for moving to D3
12160 * @dev: generic device information structure
12162 static int i40e_suspend(struct device *dev)
12164 struct pci_dev *pdev = to_pci_dev(dev);
12165 struct i40e_pf *pf = pci_get_drvdata(pdev);
12166 struct i40e_hw *hw = &pf->hw;
12168 /* If we're already suspended, then there is nothing to do */
12169 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
12172 set_bit(__I40E_DOWN, pf->state);
12174 /* Ensure service task will not be running */
12175 del_timer_sync(&pf->service_timer);
12176 cancel_work_sync(&pf->service_task);
12178 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
12179 i40e_enable_mc_magic_wake(pf);
12181 i40e_prep_for_reset(pf, false);
12183 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
12184 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
12186 /* Clear the interrupt scheme and release our IRQs so that the system
12187 * can safely hibernate even when there are a large number of CPUs.
12188 * Otherwise hibernation might fail when mapping all the vectors back
12191 i40e_clear_interrupt_scheme(pf);
12197 * i40e_resume - PM callback for waking up from D3
12198 * @dev: generic device information structure
12200 static int i40e_resume(struct device *dev)
12202 struct pci_dev *pdev = to_pci_dev(dev);
12203 struct i40e_pf *pf = pci_get_drvdata(pdev);
12206 /* If we're not suspended, then there is nothing to do */
12207 if (!test_bit(__I40E_SUSPENDED, pf->state))
12210 /* We cleared the interrupt scheme when we suspended, so we need to
12211 * restore it now to resume device functionality.
12213 err = i40e_restore_interrupt_scheme(pf);
12215 dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
12219 clear_bit(__I40E_DOWN, pf->state);
12220 i40e_reset_and_rebuild(pf, false, false);
12222 /* Clear suspended state last after everything is recovered */
12223 clear_bit(__I40E_SUSPENDED, pf->state);
12225 /* Restart the service task */
12226 mod_timer(&pf->service_timer,
12227 round_jiffies(jiffies + pf->service_timer_period));
12232 #endif /* CONFIG_PM */
12234 static const struct pci_error_handlers i40e_err_handler = {
12235 .error_detected = i40e_pci_error_detected,
12236 .slot_reset = i40e_pci_error_slot_reset,
12237 .resume = i40e_pci_error_resume,
12240 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
12242 static struct pci_driver i40e_driver = {
12243 .name = i40e_driver_name,
12244 .id_table = i40e_pci_tbl,
12245 .probe = i40e_probe,
12246 .remove = i40e_remove,
12249 .pm = &i40e_pm_ops,
12251 #endif /* CONFIG_PM */
12252 .shutdown = i40e_shutdown,
12253 .err_handler = &i40e_err_handler,
12254 .sriov_configure = i40e_pci_sriov_configure,
12258 * i40e_init_module - Driver registration routine
12260 * i40e_init_module is the first routine called when the driver is
12261 * loaded. All it does is register with the PCI subsystem.
12263 static int __init i40e_init_module(void)
12265 pr_info("%s: %s - version %s\n", i40e_driver_name,
12266 i40e_driver_string, i40e_driver_version_str);
12267 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
12269 /* There is no need to throttle the number of active tasks because
12270 * each device limits its own task using a state bit for scheduling
12271 * the service task, and the device tasks do not interfere with each
12272 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
12273 * since we need to be able to guarantee forward progress even under
12276 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
12278 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
12283 return pci_register_driver(&i40e_driver);
12285 module_init(i40e_init_module);
12288 * i40e_exit_module - Driver exit cleanup routine
12290 * i40e_exit_module is called just before the driver is removed
12293 static void __exit i40e_exit_module(void)
12295 pci_unregister_driver(&i40e_driver);
12296 destroy_workqueue(i40e_wq);
12299 module_exit(i40e_exit_module);