1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/etherdevice.h>
5 #include <linux/of_net.h>
8 #include <generated/utsrelease.h>
12 #include "i40e_diag.h"
14 #include <net/udp_tunnel.h>
15 #include <net/xdp_sock_drv.h>
16 /* All i40e tracepoints are defined by the include below, which
17 * must be included exactly once across the whole kernel with
18 * CREATE_TRACE_POINTS defined
20 #define CREATE_TRACE_POINTS
21 #include "i40e_trace.h"
23 const char i40e_driver_name[] = "i40e";
24 static const char i40e_driver_string[] =
25 "Intel(R) Ethernet Connection XL710 Network Driver";
27 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
29 /* a bit of forward declarations */
30 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
31 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
32 static int i40e_add_vsi(struct i40e_vsi *vsi);
33 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
34 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
35 static int i40e_setup_misc_vector(struct i40e_pf *pf);
36 static void i40e_determine_queue_usage(struct i40e_pf *pf);
37 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
38 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
39 static int i40e_reset(struct i40e_pf *pf);
40 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
41 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
42 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
43 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
44 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
45 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
46 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
47 static int i40e_get_capabilities(struct i40e_pf *pf,
48 enum i40e_admin_queue_opc list_type);
49 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
51 /* i40e_pci_tbl - PCI Device ID Table
53 * Last entry must be all 0s
55 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
56 * Class, Class Mask, private data (not used) }
58 static const struct pci_device_id i40e_pci_tbl[] = {
59 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
60 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
61 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
62 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
63 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
64 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
65 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
66 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
83 /* required last entry */
86 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
88 #define I40E_MAX_VF_COUNT 128
89 static int debug = -1;
90 module_param(debug, uint, 0);
91 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
93 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
94 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
95 MODULE_LICENSE("GPL v2");
97 static struct workqueue_struct *i40e_wq;
100 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
101 * @hw: pointer to the HW structure
102 * @mem: ptr to mem struct to fill out
103 * @size: size of memory requested
104 * @alignment: what to align the allocation to
106 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
107 u64 size, u32 alignment)
109 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
111 mem->size = ALIGN(size, alignment);
112 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
121 * i40e_free_dma_mem_d - OS specific memory free for shared code
122 * @hw: pointer to the HW structure
123 * @mem: ptr to mem struct to free
125 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
127 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
129 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
138 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
139 * @hw: pointer to the HW structure
140 * @mem: ptr to mem struct to fill out
141 * @size: size of memory requested
143 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
147 mem->va = kzalloc(size, GFP_KERNEL);
156 * i40e_free_virt_mem_d - OS specific memory free for shared code
157 * @hw: pointer to the HW structure
158 * @mem: ptr to mem struct to free
160 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
162 /* it's ok to kfree a NULL pointer */
171 * i40e_get_lump - find a lump of free generic resource
172 * @pf: board private structure
173 * @pile: the pile of resource to search
174 * @needed: the number of items needed
175 * @id: an owner id to stick on the items assigned
177 * Returns the base item index of the lump, or negative for error
179 * The search_hint trick and lack of advanced fit-finding only work
180 * because we're highly likely to have all the same size lump requests.
181 * Linear search time and any fragmentation should be minimal.
183 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
189 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
190 dev_info(&pf->pdev->dev,
191 "param err: pile=%s needed=%d id=0x%04x\n",
192 pile ? "<valid>" : "<null>", needed, id);
196 /* start the linear search with an imperfect hint */
197 i = pile->search_hint;
198 while (i < pile->num_entries) {
199 /* skip already allocated entries */
200 if (pile->list[i] & I40E_PILE_VALID_BIT) {
205 /* do we have enough in this lump? */
206 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
207 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
212 /* there was enough, so assign it to the requestor */
213 for (j = 0; j < needed; j++)
214 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
216 pile->search_hint = i + j;
220 /* not enough, so skip over it and continue looking */
228 * i40e_put_lump - return a lump of generic resource
229 * @pile: the pile of resource to search
230 * @index: the base item index
231 * @id: the owner id of the items assigned
233 * Returns the count of items in the lump
235 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
237 int valid_id = (id | I40E_PILE_VALID_BIT);
241 if (!pile || index >= pile->num_entries)
245 i < pile->num_entries && pile->list[i] == valid_id;
251 if (count && index < pile->search_hint)
252 pile->search_hint = index;
258 * i40e_find_vsi_from_id - searches for the vsi with the given id
259 * @pf: the pf structure to search for the vsi
260 * @id: id of the vsi it is searching for
262 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
266 for (i = 0; i < pf->num_alloc_vsi; i++)
267 if (pf->vsi[i] && (pf->vsi[i]->id == id))
274 * i40e_service_event_schedule - Schedule the service task to wake up
275 * @pf: board private structure
277 * If not already scheduled, this puts the task into the work queue
279 void i40e_service_event_schedule(struct i40e_pf *pf)
281 if ((!test_bit(__I40E_DOWN, pf->state) &&
282 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
283 test_bit(__I40E_RECOVERY_MODE, pf->state))
284 queue_work(i40e_wq, &pf->service_task);
288 * i40e_tx_timeout - Respond to a Tx Hang
289 * @netdev: network interface device structure
291 * If any port has noticed a Tx timeout, it is likely that the whole
292 * device is munged, not just the one netdev port, so go for the full
295 static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
297 struct i40e_netdev_priv *np = netdev_priv(netdev);
298 struct i40e_vsi *vsi = np->vsi;
299 struct i40e_pf *pf = vsi->back;
300 struct i40e_ring *tx_ring = NULL;
304 pf->tx_timeout_count++;
306 /* with txqueue index, find the tx_ring struct */
307 for (i = 0; i < vsi->num_queue_pairs; i++) {
308 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
310 vsi->tx_rings[i]->queue_index) {
311 tx_ring = vsi->tx_rings[i];
317 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
318 pf->tx_timeout_recovery_level = 1; /* reset after some time */
319 else if (time_before(jiffies,
320 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
321 return; /* don't do any new action before the next timeout */
323 /* don't kick off another recovery if one is already pending */
324 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
328 head = i40e_get_head(tx_ring);
329 /* Read interrupt register */
330 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
332 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
333 tx_ring->vsi->base_vector - 1));
335 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
337 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
338 vsi->seid, txqueue, tx_ring->next_to_clean,
339 head, tx_ring->next_to_use,
340 readl(tx_ring->tail), val);
343 pf->tx_timeout_last_recovery = jiffies;
344 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
345 pf->tx_timeout_recovery_level, txqueue);
347 switch (pf->tx_timeout_recovery_level) {
349 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
352 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
355 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
358 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
362 i40e_service_event_schedule(pf);
363 pf->tx_timeout_recovery_level++;
367 * i40e_get_vsi_stats_struct - Get System Network Statistics
368 * @vsi: the VSI we care about
370 * Returns the address of the device statistics structure.
371 * The statistics are actually updated from the service task.
373 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
375 return &vsi->net_stats;
379 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
380 * @ring: Tx ring to get statistics from
381 * @stats: statistics entry to be updated
383 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
384 struct rtnl_link_stats64 *stats)
390 start = u64_stats_fetch_begin_irq(&ring->syncp);
391 packets = ring->stats.packets;
392 bytes = ring->stats.bytes;
393 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
395 stats->tx_packets += packets;
396 stats->tx_bytes += bytes;
400 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
401 * @netdev: network interface device structure
402 * @stats: data structure to store statistics
404 * Returns the address of the device statistics structure.
405 * The statistics are actually updated from the service task.
407 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
408 struct rtnl_link_stats64 *stats)
410 struct i40e_netdev_priv *np = netdev_priv(netdev);
411 struct i40e_vsi *vsi = np->vsi;
412 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
413 struct i40e_ring *ring;
416 if (test_bit(__I40E_VSI_DOWN, vsi->state))
423 for (i = 0; i < vsi->num_queue_pairs; i++) {
427 ring = READ_ONCE(vsi->tx_rings[i]);
430 i40e_get_netdev_stats_struct_tx(ring, stats);
432 if (i40e_enabled_xdp_vsi(vsi)) {
433 ring = READ_ONCE(vsi->xdp_rings[i]);
436 i40e_get_netdev_stats_struct_tx(ring, stats);
439 ring = READ_ONCE(vsi->rx_rings[i]);
443 start = u64_stats_fetch_begin_irq(&ring->syncp);
444 packets = ring->stats.packets;
445 bytes = ring->stats.bytes;
446 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
448 stats->rx_packets += packets;
449 stats->rx_bytes += bytes;
454 /* following stats updated by i40e_watchdog_subtask() */
455 stats->multicast = vsi_stats->multicast;
456 stats->tx_errors = vsi_stats->tx_errors;
457 stats->tx_dropped = vsi_stats->tx_dropped;
458 stats->rx_errors = vsi_stats->rx_errors;
459 stats->rx_dropped = vsi_stats->rx_dropped;
460 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
461 stats->rx_length_errors = vsi_stats->rx_length_errors;
465 * i40e_vsi_reset_stats - Resets all stats of the given vsi
466 * @vsi: the VSI to have its stats reset
468 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
470 struct rtnl_link_stats64 *ns;
476 ns = i40e_get_vsi_stats_struct(vsi);
477 memset(ns, 0, sizeof(*ns));
478 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
479 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
480 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
481 if (vsi->rx_rings && vsi->rx_rings[0]) {
482 for (i = 0; i < vsi->num_queue_pairs; i++) {
483 memset(&vsi->rx_rings[i]->stats, 0,
484 sizeof(vsi->rx_rings[i]->stats));
485 memset(&vsi->rx_rings[i]->rx_stats, 0,
486 sizeof(vsi->rx_rings[i]->rx_stats));
487 memset(&vsi->tx_rings[i]->stats, 0,
488 sizeof(vsi->tx_rings[i]->stats));
489 memset(&vsi->tx_rings[i]->tx_stats, 0,
490 sizeof(vsi->tx_rings[i]->tx_stats));
493 vsi->stat_offsets_loaded = false;
497 * i40e_pf_reset_stats - Reset all of the stats for the given PF
498 * @pf: the PF to be reset
500 void i40e_pf_reset_stats(struct i40e_pf *pf)
504 memset(&pf->stats, 0, sizeof(pf->stats));
505 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
506 pf->stat_offsets_loaded = false;
508 for (i = 0; i < I40E_MAX_VEB; i++) {
510 memset(&pf->veb[i]->stats, 0,
511 sizeof(pf->veb[i]->stats));
512 memset(&pf->veb[i]->stats_offsets, 0,
513 sizeof(pf->veb[i]->stats_offsets));
514 memset(&pf->veb[i]->tc_stats, 0,
515 sizeof(pf->veb[i]->tc_stats));
516 memset(&pf->veb[i]->tc_stats_offsets, 0,
517 sizeof(pf->veb[i]->tc_stats_offsets));
518 pf->veb[i]->stat_offsets_loaded = false;
521 pf->hw_csum_rx_error = 0;
525 * i40e_stat_update48 - read and update a 48 bit stat from the chip
526 * @hw: ptr to the hardware info
527 * @hireg: the high 32 bit reg to read
528 * @loreg: the low 32 bit reg to read
529 * @offset_loaded: has the initial offset been loaded yet
530 * @offset: ptr to current offset value
531 * @stat: ptr to the stat
533 * Since the device stats are not reset at PFReset, they likely will not
534 * be zeroed when the driver starts. We'll save the first values read
535 * and use them as offsets to be subtracted from the raw values in order
536 * to report stats that count from zero. In the process, we also manage
537 * the potential roll-over.
539 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
540 bool offset_loaded, u64 *offset, u64 *stat)
544 if (hw->device_id == I40E_DEV_ID_QEMU) {
545 new_data = rd32(hw, loreg);
546 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
548 new_data = rd64(hw, loreg);
552 if (likely(new_data >= *offset))
553 *stat = new_data - *offset;
555 *stat = (new_data + BIT_ULL(48)) - *offset;
556 *stat &= 0xFFFFFFFFFFFFULL;
560 * i40e_stat_update32 - read and update a 32 bit stat from the chip
561 * @hw: ptr to the hardware info
562 * @reg: the hw reg to read
563 * @offset_loaded: has the initial offset been loaded yet
564 * @offset: ptr to current offset value
565 * @stat: ptr to the stat
567 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
568 bool offset_loaded, u64 *offset, u64 *stat)
572 new_data = rd32(hw, reg);
575 if (likely(new_data >= *offset))
576 *stat = (u32)(new_data - *offset);
578 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
582 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
583 * @hw: ptr to the hardware info
584 * @reg: the hw reg to read and clear
585 * @stat: ptr to the stat
587 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
589 u32 new_data = rd32(hw, reg);
591 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
596 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
597 * @vsi: the VSI to be updated
599 void i40e_update_eth_stats(struct i40e_vsi *vsi)
601 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
602 struct i40e_pf *pf = vsi->back;
603 struct i40e_hw *hw = &pf->hw;
604 struct i40e_eth_stats *oes;
605 struct i40e_eth_stats *es; /* device's eth stats */
607 es = &vsi->eth_stats;
608 oes = &vsi->eth_stats_offsets;
610 /* Gather up the stats that the hw collects */
611 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
612 vsi->stat_offsets_loaded,
613 &oes->tx_errors, &es->tx_errors);
614 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
615 vsi->stat_offsets_loaded,
616 &oes->rx_discards, &es->rx_discards);
617 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
618 vsi->stat_offsets_loaded,
619 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
621 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
622 I40E_GLV_GORCL(stat_idx),
623 vsi->stat_offsets_loaded,
624 &oes->rx_bytes, &es->rx_bytes);
625 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
626 I40E_GLV_UPRCL(stat_idx),
627 vsi->stat_offsets_loaded,
628 &oes->rx_unicast, &es->rx_unicast);
629 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
630 I40E_GLV_MPRCL(stat_idx),
631 vsi->stat_offsets_loaded,
632 &oes->rx_multicast, &es->rx_multicast);
633 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
634 I40E_GLV_BPRCL(stat_idx),
635 vsi->stat_offsets_loaded,
636 &oes->rx_broadcast, &es->rx_broadcast);
638 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
639 I40E_GLV_GOTCL(stat_idx),
640 vsi->stat_offsets_loaded,
641 &oes->tx_bytes, &es->tx_bytes);
642 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
643 I40E_GLV_UPTCL(stat_idx),
644 vsi->stat_offsets_loaded,
645 &oes->tx_unicast, &es->tx_unicast);
646 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
647 I40E_GLV_MPTCL(stat_idx),
648 vsi->stat_offsets_loaded,
649 &oes->tx_multicast, &es->tx_multicast);
650 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
651 I40E_GLV_BPTCL(stat_idx),
652 vsi->stat_offsets_loaded,
653 &oes->tx_broadcast, &es->tx_broadcast);
654 vsi->stat_offsets_loaded = true;
658 * i40e_update_veb_stats - Update Switch component statistics
659 * @veb: the VEB being updated
661 void i40e_update_veb_stats(struct i40e_veb *veb)
663 struct i40e_pf *pf = veb->pf;
664 struct i40e_hw *hw = &pf->hw;
665 struct i40e_eth_stats *oes;
666 struct i40e_eth_stats *es; /* device's eth stats */
667 struct i40e_veb_tc_stats *veb_oes;
668 struct i40e_veb_tc_stats *veb_es;
671 idx = veb->stats_idx;
673 oes = &veb->stats_offsets;
674 veb_es = &veb->tc_stats;
675 veb_oes = &veb->tc_stats_offsets;
677 /* Gather up the stats that the hw collects */
678 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
679 veb->stat_offsets_loaded,
680 &oes->tx_discards, &es->tx_discards);
681 if (hw->revision_id > 0)
682 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
683 veb->stat_offsets_loaded,
684 &oes->rx_unknown_protocol,
685 &es->rx_unknown_protocol);
686 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
687 veb->stat_offsets_loaded,
688 &oes->rx_bytes, &es->rx_bytes);
689 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
690 veb->stat_offsets_loaded,
691 &oes->rx_unicast, &es->rx_unicast);
692 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
693 veb->stat_offsets_loaded,
694 &oes->rx_multicast, &es->rx_multicast);
695 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
696 veb->stat_offsets_loaded,
697 &oes->rx_broadcast, &es->rx_broadcast);
699 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
700 veb->stat_offsets_loaded,
701 &oes->tx_bytes, &es->tx_bytes);
702 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
703 veb->stat_offsets_loaded,
704 &oes->tx_unicast, &es->tx_unicast);
705 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
706 veb->stat_offsets_loaded,
707 &oes->tx_multicast, &es->tx_multicast);
708 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
709 veb->stat_offsets_loaded,
710 &oes->tx_broadcast, &es->tx_broadcast);
711 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
712 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
713 I40E_GLVEBTC_RPCL(i, idx),
714 veb->stat_offsets_loaded,
715 &veb_oes->tc_rx_packets[i],
716 &veb_es->tc_rx_packets[i]);
717 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
718 I40E_GLVEBTC_RBCL(i, idx),
719 veb->stat_offsets_loaded,
720 &veb_oes->tc_rx_bytes[i],
721 &veb_es->tc_rx_bytes[i]);
722 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
723 I40E_GLVEBTC_TPCL(i, idx),
724 veb->stat_offsets_loaded,
725 &veb_oes->tc_tx_packets[i],
726 &veb_es->tc_tx_packets[i]);
727 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
728 I40E_GLVEBTC_TBCL(i, idx),
729 veb->stat_offsets_loaded,
730 &veb_oes->tc_tx_bytes[i],
731 &veb_es->tc_tx_bytes[i]);
733 veb->stat_offsets_loaded = true;
737 * i40e_update_vsi_stats - Update the vsi statistics counters.
738 * @vsi: the VSI to be updated
740 * There are a few instances where we store the same stat in a
741 * couple of different structs. This is partly because we have
742 * the netdev stats that need to be filled out, which is slightly
743 * different from the "eth_stats" defined by the chip and used in
744 * VF communications. We sort it out here.
746 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
748 struct i40e_pf *pf = vsi->back;
749 struct rtnl_link_stats64 *ons;
750 struct rtnl_link_stats64 *ns; /* netdev stats */
751 struct i40e_eth_stats *oes;
752 struct i40e_eth_stats *es; /* device's eth stats */
753 u32 tx_restart, tx_busy;
764 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
765 test_bit(__I40E_CONFIG_BUSY, pf->state))
768 ns = i40e_get_vsi_stats_struct(vsi);
769 ons = &vsi->net_stats_offsets;
770 es = &vsi->eth_stats;
771 oes = &vsi->eth_stats_offsets;
773 /* Gather up the netdev and vsi stats that the driver collects
774 * on the fly during packet processing
778 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
782 for (q = 0; q < vsi->num_queue_pairs; q++) {
784 p = READ_ONCE(vsi->tx_rings[q]);
789 start = u64_stats_fetch_begin_irq(&p->syncp);
790 packets = p->stats.packets;
791 bytes = p->stats.bytes;
792 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
795 tx_restart += p->tx_stats.restart_queue;
796 tx_busy += p->tx_stats.tx_busy;
797 tx_linearize += p->tx_stats.tx_linearize;
798 tx_force_wb += p->tx_stats.tx_force_wb;
801 p = READ_ONCE(vsi->rx_rings[q]);
806 start = u64_stats_fetch_begin_irq(&p->syncp);
807 packets = p->stats.packets;
808 bytes = p->stats.bytes;
809 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
812 rx_buf += p->rx_stats.alloc_buff_failed;
813 rx_page += p->rx_stats.alloc_page_failed;
815 if (i40e_enabled_xdp_vsi(vsi)) {
816 /* locate XDP ring */
817 p = READ_ONCE(vsi->xdp_rings[q]);
822 start = u64_stats_fetch_begin_irq(&p->syncp);
823 packets = p->stats.packets;
824 bytes = p->stats.bytes;
825 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
828 tx_restart += p->tx_stats.restart_queue;
829 tx_busy += p->tx_stats.tx_busy;
830 tx_linearize += p->tx_stats.tx_linearize;
831 tx_force_wb += p->tx_stats.tx_force_wb;
835 vsi->tx_restart = tx_restart;
836 vsi->tx_busy = tx_busy;
837 vsi->tx_linearize = tx_linearize;
838 vsi->tx_force_wb = tx_force_wb;
839 vsi->rx_page_failed = rx_page;
840 vsi->rx_buf_failed = rx_buf;
842 ns->rx_packets = rx_p;
844 ns->tx_packets = tx_p;
847 /* update netdev stats from eth stats */
848 i40e_update_eth_stats(vsi);
849 ons->tx_errors = oes->tx_errors;
850 ns->tx_errors = es->tx_errors;
851 ons->multicast = oes->rx_multicast;
852 ns->multicast = es->rx_multicast;
853 ons->rx_dropped = oes->rx_discards;
854 ns->rx_dropped = es->rx_discards;
855 ons->tx_dropped = oes->tx_discards;
856 ns->tx_dropped = es->tx_discards;
858 /* pull in a couple PF stats if this is the main vsi */
859 if (vsi == pf->vsi[pf->lan_vsi]) {
860 ns->rx_crc_errors = pf->stats.crc_errors;
861 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
862 ns->rx_length_errors = pf->stats.rx_length_errors;
867 * i40e_update_pf_stats - Update the PF statistics counters.
868 * @pf: the PF to be updated
870 static void i40e_update_pf_stats(struct i40e_pf *pf)
872 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
873 struct i40e_hw_port_stats *nsd = &pf->stats;
874 struct i40e_hw *hw = &pf->hw;
878 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
879 I40E_GLPRT_GORCL(hw->port),
880 pf->stat_offsets_loaded,
881 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
882 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
883 I40E_GLPRT_GOTCL(hw->port),
884 pf->stat_offsets_loaded,
885 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
886 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
887 pf->stat_offsets_loaded,
888 &osd->eth.rx_discards,
889 &nsd->eth.rx_discards);
890 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
891 I40E_GLPRT_UPRCL(hw->port),
892 pf->stat_offsets_loaded,
893 &osd->eth.rx_unicast,
894 &nsd->eth.rx_unicast);
895 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
896 I40E_GLPRT_MPRCL(hw->port),
897 pf->stat_offsets_loaded,
898 &osd->eth.rx_multicast,
899 &nsd->eth.rx_multicast);
900 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
901 I40E_GLPRT_BPRCL(hw->port),
902 pf->stat_offsets_loaded,
903 &osd->eth.rx_broadcast,
904 &nsd->eth.rx_broadcast);
905 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
906 I40E_GLPRT_UPTCL(hw->port),
907 pf->stat_offsets_loaded,
908 &osd->eth.tx_unicast,
909 &nsd->eth.tx_unicast);
910 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
911 I40E_GLPRT_MPTCL(hw->port),
912 pf->stat_offsets_loaded,
913 &osd->eth.tx_multicast,
914 &nsd->eth.tx_multicast);
915 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
916 I40E_GLPRT_BPTCL(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->eth.tx_broadcast,
919 &nsd->eth.tx_broadcast);
921 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
922 pf->stat_offsets_loaded,
923 &osd->tx_dropped_link_down,
924 &nsd->tx_dropped_link_down);
926 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
927 pf->stat_offsets_loaded,
928 &osd->crc_errors, &nsd->crc_errors);
930 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
931 pf->stat_offsets_loaded,
932 &osd->illegal_bytes, &nsd->illegal_bytes);
934 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
935 pf->stat_offsets_loaded,
936 &osd->mac_local_faults,
937 &nsd->mac_local_faults);
938 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
939 pf->stat_offsets_loaded,
940 &osd->mac_remote_faults,
941 &nsd->mac_remote_faults);
943 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->rx_length_errors,
946 &nsd->rx_length_errors);
948 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
949 pf->stat_offsets_loaded,
950 &osd->link_xon_rx, &nsd->link_xon_rx);
951 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
952 pf->stat_offsets_loaded,
953 &osd->link_xon_tx, &nsd->link_xon_tx);
954 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
955 pf->stat_offsets_loaded,
956 &osd->link_xoff_rx, &nsd->link_xoff_rx);
957 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->link_xoff_tx, &nsd->link_xoff_tx);
961 for (i = 0; i < 8; i++) {
962 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
963 pf->stat_offsets_loaded,
964 &osd->priority_xoff_rx[i],
965 &nsd->priority_xoff_rx[i]);
966 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
967 pf->stat_offsets_loaded,
968 &osd->priority_xon_rx[i],
969 &nsd->priority_xon_rx[i]);
970 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
971 pf->stat_offsets_loaded,
972 &osd->priority_xon_tx[i],
973 &nsd->priority_xon_tx[i]);
974 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
975 pf->stat_offsets_loaded,
976 &osd->priority_xoff_tx[i],
977 &nsd->priority_xoff_tx[i]);
978 i40e_stat_update32(hw,
979 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
980 pf->stat_offsets_loaded,
981 &osd->priority_xon_2_xoff[i],
982 &nsd->priority_xon_2_xoff[i]);
985 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
986 I40E_GLPRT_PRC64L(hw->port),
987 pf->stat_offsets_loaded,
988 &osd->rx_size_64, &nsd->rx_size_64);
989 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
990 I40E_GLPRT_PRC127L(hw->port),
991 pf->stat_offsets_loaded,
992 &osd->rx_size_127, &nsd->rx_size_127);
993 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
994 I40E_GLPRT_PRC255L(hw->port),
995 pf->stat_offsets_loaded,
996 &osd->rx_size_255, &nsd->rx_size_255);
997 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
998 I40E_GLPRT_PRC511L(hw->port),
999 pf->stat_offsets_loaded,
1000 &osd->rx_size_511, &nsd->rx_size_511);
1001 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1002 I40E_GLPRT_PRC1023L(hw->port),
1003 pf->stat_offsets_loaded,
1004 &osd->rx_size_1023, &nsd->rx_size_1023);
1005 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1006 I40E_GLPRT_PRC1522L(hw->port),
1007 pf->stat_offsets_loaded,
1008 &osd->rx_size_1522, &nsd->rx_size_1522);
1009 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1010 I40E_GLPRT_PRC9522L(hw->port),
1011 pf->stat_offsets_loaded,
1012 &osd->rx_size_big, &nsd->rx_size_big);
1014 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1015 I40E_GLPRT_PTC64L(hw->port),
1016 pf->stat_offsets_loaded,
1017 &osd->tx_size_64, &nsd->tx_size_64);
1018 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1019 I40E_GLPRT_PTC127L(hw->port),
1020 pf->stat_offsets_loaded,
1021 &osd->tx_size_127, &nsd->tx_size_127);
1022 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1023 I40E_GLPRT_PTC255L(hw->port),
1024 pf->stat_offsets_loaded,
1025 &osd->tx_size_255, &nsd->tx_size_255);
1026 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1027 I40E_GLPRT_PTC511L(hw->port),
1028 pf->stat_offsets_loaded,
1029 &osd->tx_size_511, &nsd->tx_size_511);
1030 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1031 I40E_GLPRT_PTC1023L(hw->port),
1032 pf->stat_offsets_loaded,
1033 &osd->tx_size_1023, &nsd->tx_size_1023);
1034 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1035 I40E_GLPRT_PTC1522L(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->tx_size_1522, &nsd->tx_size_1522);
1038 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1039 I40E_GLPRT_PTC9522L(hw->port),
1040 pf->stat_offsets_loaded,
1041 &osd->tx_size_big, &nsd->tx_size_big);
1043 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1044 pf->stat_offsets_loaded,
1045 &osd->rx_undersize, &nsd->rx_undersize);
1046 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1047 pf->stat_offsets_loaded,
1048 &osd->rx_fragments, &nsd->rx_fragments);
1049 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1050 pf->stat_offsets_loaded,
1051 &osd->rx_oversize, &nsd->rx_oversize);
1052 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1053 pf->stat_offsets_loaded,
1054 &osd->rx_jabber, &nsd->rx_jabber);
1057 i40e_stat_update_and_clear32(hw,
1058 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1059 &nsd->fd_atr_match);
1060 i40e_stat_update_and_clear32(hw,
1061 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1063 i40e_stat_update_and_clear32(hw,
1064 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1065 &nsd->fd_atr_tunnel_match);
1067 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1068 nsd->tx_lpi_status =
1069 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1070 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1071 nsd->rx_lpi_status =
1072 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1073 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1074 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1075 pf->stat_offsets_loaded,
1076 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1077 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1078 pf->stat_offsets_loaded,
1079 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1081 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1082 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1083 nsd->fd_sb_status = true;
1085 nsd->fd_sb_status = false;
1087 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1088 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1089 nsd->fd_atr_status = true;
1091 nsd->fd_atr_status = false;
1093 pf->stat_offsets_loaded = true;
1097 * i40e_update_stats - Update the various statistics counters.
1098 * @vsi: the VSI to be updated
1100 * Update the various stats for this VSI and its related entities.
1102 void i40e_update_stats(struct i40e_vsi *vsi)
1104 struct i40e_pf *pf = vsi->back;
1106 if (vsi == pf->vsi[pf->lan_vsi])
1107 i40e_update_pf_stats(pf);
1109 i40e_update_vsi_stats(vsi);
1113 * i40e_count_filters - counts VSI mac filters
1114 * @vsi: the VSI to be searched
1116 * Returns count of mac filters
1118 int i40e_count_filters(struct i40e_vsi *vsi)
1120 struct i40e_mac_filter *f;
1121 struct hlist_node *h;
1125 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
1132 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1133 * @vsi: the VSI to be searched
1134 * @macaddr: the MAC address
1137 * Returns ptr to the filter object or NULL
1139 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1140 const u8 *macaddr, s16 vlan)
1142 struct i40e_mac_filter *f;
1145 if (!vsi || !macaddr)
1148 key = i40e_addr_to_hkey(macaddr);
1149 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1150 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1158 * i40e_find_mac - Find a mac addr in the macvlan filters list
1159 * @vsi: the VSI to be searched
1160 * @macaddr: the MAC address we are searching for
1162 * Returns the first filter with the provided MAC address or NULL if
1163 * MAC address was not found
1165 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1167 struct i40e_mac_filter *f;
1170 if (!vsi || !macaddr)
1173 key = i40e_addr_to_hkey(macaddr);
1174 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1175 if ((ether_addr_equal(macaddr, f->macaddr)))
1182 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1183 * @vsi: the VSI to be searched
1185 * Returns true if VSI is in vlan mode or false otherwise
1187 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1189 /* If we have a PVID, always operate in VLAN mode */
1193 /* We need to operate in VLAN mode whenever we have any filters with
1194 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1195 * time, incurring search cost repeatedly. However, we can notice two
1198 * 1) the only place where we can gain a VLAN filter is in
1201 * 2) the only place where filters are actually removed is in
1202 * i40e_sync_filters_subtask.
1204 * Thus, we can simply use a boolean value, has_vlan_filters which we
1205 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1206 * we have to perform the full search after deleting filters in
1207 * i40e_sync_filters_subtask, but we already have to search
1208 * filters here and can perform the check at the same time. This
1209 * results in avoiding embedding a loop for VLAN mode inside another
1210 * loop over all the filters, and should maintain correctness as noted
1213 return vsi->has_vlan_filter;
1217 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1218 * @vsi: the VSI to configure
1219 * @tmp_add_list: list of filters ready to be added
1220 * @tmp_del_list: list of filters ready to be deleted
1221 * @vlan_filters: the number of active VLAN filters
1223 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1224 * behave as expected. If we have any active VLAN filters remaining or about
1225 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1226 * so that they only match against untagged traffic. If we no longer have any
1227 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1228 * so that they match against both tagged and untagged traffic. In this way,
1229 * we ensure that we correctly receive the desired traffic. This ensures that
1230 * when we have an active VLAN we will receive only untagged traffic and
1231 * traffic matching active VLANs. If we have no active VLANs then we will
1232 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1234 * Finally, in a similar fashion, this function also corrects filters when
1235 * there is an active PVID assigned to this VSI.
1237 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1239 * This function is only expected to be called from within
1240 * i40e_sync_vsi_filters.
1242 * NOTE: This function expects to be called while under the
1243 * mac_filter_hash_lock
1245 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1246 struct hlist_head *tmp_add_list,
1247 struct hlist_head *tmp_del_list,
1250 s16 pvid = le16_to_cpu(vsi->info.pvid);
1251 struct i40e_mac_filter *f, *add_head;
1252 struct i40e_new_mac_filter *new;
1253 struct hlist_node *h;
1256 /* To determine if a particular filter needs to be replaced we
1257 * have the three following conditions:
1259 * a) if we have a PVID assigned, then all filters which are
1260 * not marked as VLAN=PVID must be replaced with filters that
1262 * b) otherwise, if we have any active VLANS, all filters
1263 * which are marked as VLAN=-1 must be replaced with
1264 * filters marked as VLAN=0
1265 * c) finally, if we do not have any active VLANS, all filters
1266 * which are marked as VLAN=0 must be replaced with filters
1270 /* Update the filters about to be added in place */
1271 hlist_for_each_entry(new, tmp_add_list, hlist) {
1272 if (pvid && new->f->vlan != pvid)
1273 new->f->vlan = pvid;
1274 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1276 else if (!vlan_filters && new->f->vlan == 0)
1277 new->f->vlan = I40E_VLAN_ANY;
1280 /* Update the remaining active filters */
1281 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1282 /* Combine the checks for whether a filter needs to be changed
1283 * and then determine the new VLAN inside the if block, in
1284 * order to avoid duplicating code for adding the new filter
1285 * then deleting the old filter.
1287 if ((pvid && f->vlan != pvid) ||
1288 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1289 (!vlan_filters && f->vlan == 0)) {
1290 /* Determine the new vlan we will be adding */
1293 else if (vlan_filters)
1296 new_vlan = I40E_VLAN_ANY;
1298 /* Create the new filter */
1299 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1303 /* Create a temporary i40e_new_mac_filter */
1304 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1309 new->state = add_head->state;
1311 /* Add the new filter to the tmp list */
1312 hlist_add_head(&new->hlist, tmp_add_list);
1314 /* Put the original filter into the delete list */
1315 f->state = I40E_FILTER_REMOVE;
1316 hash_del(&f->hlist);
1317 hlist_add_head(&f->hlist, tmp_del_list);
1321 vsi->has_vlan_filter = !!vlan_filters;
1327 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1328 * @vsi: the PF Main VSI - inappropriate for any other VSI
1329 * @macaddr: the MAC address
1331 * Remove whatever filter the firmware set up so the driver can manage
1332 * its own filtering intelligently.
1334 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1336 struct i40e_aqc_remove_macvlan_element_data element;
1337 struct i40e_pf *pf = vsi->back;
1339 /* Only appropriate for the PF main VSI */
1340 if (vsi->type != I40E_VSI_MAIN)
1343 memset(&element, 0, sizeof(element));
1344 ether_addr_copy(element.mac_addr, macaddr);
1345 element.vlan_tag = 0;
1346 /* Ignore error returns, some firmware does it this way... */
1347 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1348 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1350 memset(&element, 0, sizeof(element));
1351 ether_addr_copy(element.mac_addr, macaddr);
1352 element.vlan_tag = 0;
1353 /* ...and some firmware does it this way. */
1354 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1355 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1356 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1360 * i40e_add_filter - Add a mac/vlan filter to the VSI
1361 * @vsi: the VSI to be searched
1362 * @macaddr: the MAC address
1365 * Returns ptr to the filter object or NULL when no memory available.
1367 * NOTE: This function is expected to be called with mac_filter_hash_lock
1370 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1371 const u8 *macaddr, s16 vlan)
1373 struct i40e_mac_filter *f;
1376 if (!vsi || !macaddr)
1379 f = i40e_find_filter(vsi, macaddr, vlan);
1381 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1385 /* Update the boolean indicating if we need to function in
1389 vsi->has_vlan_filter = true;
1391 ether_addr_copy(f->macaddr, macaddr);
1393 f->state = I40E_FILTER_NEW;
1394 INIT_HLIST_NODE(&f->hlist);
1396 key = i40e_addr_to_hkey(macaddr);
1397 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1399 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1400 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1403 /* If we're asked to add a filter that has been marked for removal, it
1404 * is safe to simply restore it to active state. __i40e_del_filter
1405 * will have simply deleted any filters which were previously marked
1406 * NEW or FAILED, so if it is currently marked REMOVE it must have
1407 * previously been ACTIVE. Since we haven't yet run the sync filters
1408 * task, just restore this filter to the ACTIVE state so that the
1409 * sync task leaves it in place
1411 if (f->state == I40E_FILTER_REMOVE)
1412 f->state = I40E_FILTER_ACTIVE;
1418 * __i40e_del_filter - Remove a specific filter from the VSI
1419 * @vsi: VSI to remove from
1420 * @f: the filter to remove from the list
1422 * This function should be called instead of i40e_del_filter only if you know
1423 * the exact filter you will remove already, such as via i40e_find_filter or
1426 * NOTE: This function is expected to be called with mac_filter_hash_lock
1428 * ANOTHER NOTE: This function MUST be called from within the context of
1429 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1430 * instead of list_for_each_entry().
1432 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1437 /* If the filter was never added to firmware then we can just delete it
1438 * directly and we don't want to set the status to remove or else an
1439 * admin queue command will unnecessarily fire.
1441 if ((f->state == I40E_FILTER_FAILED) ||
1442 (f->state == I40E_FILTER_NEW)) {
1443 hash_del(&f->hlist);
1446 f->state = I40E_FILTER_REMOVE;
1449 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1450 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1454 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1455 * @vsi: the VSI to be searched
1456 * @macaddr: the MAC address
1459 * NOTE: This function is expected to be called with mac_filter_hash_lock
1461 * ANOTHER NOTE: This function MUST be called from within the context of
1462 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1463 * instead of list_for_each_entry().
1465 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1467 struct i40e_mac_filter *f;
1469 if (!vsi || !macaddr)
1472 f = i40e_find_filter(vsi, macaddr, vlan);
1473 __i40e_del_filter(vsi, f);
1477 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1478 * @vsi: the VSI to be searched
1479 * @macaddr: the mac address to be filtered
1481 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1482 * go through all the macvlan filters and add a macvlan filter for each
1483 * unique vlan that already exists. If a PVID has been assigned, instead only
1484 * add the macaddr to that VLAN.
1486 * Returns last filter added on success, else NULL
1488 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1491 struct i40e_mac_filter *f, *add = NULL;
1492 struct hlist_node *h;
1496 return i40e_add_filter(vsi, macaddr,
1497 le16_to_cpu(vsi->info.pvid));
1499 if (!i40e_is_vsi_in_vlan(vsi))
1500 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1502 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1503 if (f->state == I40E_FILTER_REMOVE)
1505 add = i40e_add_filter(vsi, macaddr, f->vlan);
1514 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1515 * @vsi: the VSI to be searched
1516 * @macaddr: the mac address to be removed
1518 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1521 * Returns 0 for success, or error
1523 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1525 struct i40e_mac_filter *f;
1526 struct hlist_node *h;
1530 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1531 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1532 if (ether_addr_equal(macaddr, f->macaddr)) {
1533 __i40e_del_filter(vsi, f);
1545 * i40e_set_mac - NDO callback to set mac address
1546 * @netdev: network interface device structure
1547 * @p: pointer to an address structure
1549 * Returns 0 on success, negative on failure
1551 static int i40e_set_mac(struct net_device *netdev, void *p)
1553 struct i40e_netdev_priv *np = netdev_priv(netdev);
1554 struct i40e_vsi *vsi = np->vsi;
1555 struct i40e_pf *pf = vsi->back;
1556 struct i40e_hw *hw = &pf->hw;
1557 struct sockaddr *addr = p;
1559 if (!is_valid_ether_addr(addr->sa_data))
1560 return -EADDRNOTAVAIL;
1562 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1563 netdev_info(netdev, "already using mac address %pM\n",
1568 if (test_bit(__I40E_DOWN, pf->state) ||
1569 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1570 return -EADDRNOTAVAIL;
1572 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1573 netdev_info(netdev, "returning to hw mac address %pM\n",
1576 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1578 /* Copy the address first, so that we avoid a possible race with
1580 * - Remove old address from MAC filter
1581 * - Copy new address
1582 * - Add new address to MAC filter
1584 spin_lock_bh(&vsi->mac_filter_hash_lock);
1585 i40e_del_mac_filter(vsi, netdev->dev_addr);
1586 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1587 i40e_add_mac_filter(vsi, netdev->dev_addr);
1588 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1590 if (vsi->type == I40E_VSI_MAIN) {
1593 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1594 addr->sa_data, NULL);
1596 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1597 i40e_stat_str(hw, ret),
1598 i40e_aq_str(hw, hw->aq.asq_last_status));
1601 /* schedule our worker thread which will take care of
1602 * applying the new filter changes
1604 i40e_service_event_schedule(pf);
1609 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1610 * @vsi: vsi structure
1611 * @seed: RSS hash seed
1613 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1614 u8 *lut, u16 lut_size)
1616 struct i40e_pf *pf = vsi->back;
1617 struct i40e_hw *hw = &pf->hw;
1621 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1622 (struct i40e_aqc_get_set_rss_key_data *)seed;
1623 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1625 dev_info(&pf->pdev->dev,
1626 "Cannot set RSS key, err %s aq_err %s\n",
1627 i40e_stat_str(hw, ret),
1628 i40e_aq_str(hw, hw->aq.asq_last_status));
1633 bool pf_lut = vsi->type == I40E_VSI_MAIN;
1635 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1637 dev_info(&pf->pdev->dev,
1638 "Cannot set RSS lut, err %s aq_err %s\n",
1639 i40e_stat_str(hw, ret),
1640 i40e_aq_str(hw, hw->aq.asq_last_status));
1648 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1649 * @vsi: VSI structure
1651 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1653 struct i40e_pf *pf = vsi->back;
1654 u8 seed[I40E_HKEY_ARRAY_SIZE];
1658 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1661 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1662 vsi->num_queue_pairs);
1665 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1669 /* Use the user configured hash keys and lookup table if there is one,
1670 * otherwise use default
1672 if (vsi->rss_lut_user)
1673 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1675 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1676 if (vsi->rss_hkey_user)
1677 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1679 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1680 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1686 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1687 * @vsi: the VSI being configured,
1688 * @ctxt: VSI context structure
1689 * @enabled_tc: number of traffic classes to enable
1691 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1693 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1694 struct i40e_vsi_context *ctxt,
1697 u16 qcount = 0, max_qcount, qmap, sections = 0;
1698 int i, override_q, pow, num_qps, ret;
1699 u8 netdev_tc = 0, offset = 0;
1701 if (vsi->type != I40E_VSI_MAIN)
1703 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1704 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1705 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1706 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1707 num_qps = vsi->mqprio_qopt.qopt.count[0];
1709 /* find the next higher power-of-2 of num queue pairs */
1710 pow = ilog2(num_qps);
1711 if (!is_power_of_2(num_qps))
1713 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1714 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1716 /* Setup queue offset/count for all TCs for given VSI */
1717 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1718 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1719 /* See if the given TC is enabled for the given VSI */
1720 if (vsi->tc_config.enabled_tc & BIT(i)) {
1721 offset = vsi->mqprio_qopt.qopt.offset[i];
1722 qcount = vsi->mqprio_qopt.qopt.count[i];
1723 if (qcount > max_qcount)
1724 max_qcount = qcount;
1725 vsi->tc_config.tc_info[i].qoffset = offset;
1726 vsi->tc_config.tc_info[i].qcount = qcount;
1727 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1729 /* TC is not enabled so set the offset to
1730 * default queue and allocate one queue
1733 vsi->tc_config.tc_info[i].qoffset = 0;
1734 vsi->tc_config.tc_info[i].qcount = 1;
1735 vsi->tc_config.tc_info[i].netdev_tc = 0;
1739 /* Set actual Tx/Rx queue pairs */
1740 vsi->num_queue_pairs = offset + qcount;
1742 /* Setup queue TC[0].qmap for given VSI context */
1743 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1744 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1745 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1746 ctxt->info.valid_sections |= cpu_to_le16(sections);
1748 /* Reconfigure RSS for main VSI with max queue count */
1749 vsi->rss_size = max_qcount;
1750 ret = i40e_vsi_config_rss(vsi);
1752 dev_info(&vsi->back->pdev->dev,
1753 "Failed to reconfig rss for num_queues (%u)\n",
1757 vsi->reconfig_rss = true;
1758 dev_dbg(&vsi->back->pdev->dev,
1759 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1761 /* Find queue count available for channel VSIs and starting offset
1764 override_q = vsi->mqprio_qopt.qopt.count[0];
1765 if (override_q && override_q < vsi->num_queue_pairs) {
1766 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1767 vsi->next_base_queue = override_q;
1773 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1774 * @vsi: the VSI being setup
1775 * @ctxt: VSI context structure
1776 * @enabled_tc: Enabled TCs bitmap
1777 * @is_add: True if called before Add VSI
1779 * Setup VSI queue mapping for enabled traffic classes.
1781 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1782 struct i40e_vsi_context *ctxt,
1786 struct i40e_pf *pf = vsi->back;
1796 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1799 /* Number of queues per enabled TC */
1800 num_tc_qps = vsi->alloc_queue_pairs;
1801 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1802 /* Find numtc from enabled TC bitmap */
1803 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1804 if (enabled_tc & BIT(i)) /* TC is enabled */
1808 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1811 num_tc_qps = num_tc_qps / numtc;
1812 num_tc_qps = min_t(int, num_tc_qps,
1813 i40e_pf_get_max_q_per_tc(pf));
1816 vsi->tc_config.numtc = numtc;
1817 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1819 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
1820 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1821 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1823 /* Setup queue offset/count for all TCs for given VSI */
1824 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1825 /* See if the given TC is enabled for the given VSI */
1826 if (vsi->tc_config.enabled_tc & BIT(i)) {
1830 switch (vsi->type) {
1832 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1833 I40E_FLAG_FD_ATR_ENABLED)) ||
1834 vsi->tc_config.enabled_tc != 1) {
1835 qcount = min_t(int, pf->alloc_rss_size,
1841 case I40E_VSI_SRIOV:
1842 case I40E_VSI_VMDQ2:
1844 qcount = num_tc_qps;
1848 vsi->tc_config.tc_info[i].qoffset = offset;
1849 vsi->tc_config.tc_info[i].qcount = qcount;
1851 /* find the next higher power-of-2 of num queue pairs */
1854 while (num_qps && (BIT_ULL(pow) < qcount)) {
1859 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1861 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1862 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1866 /* TC is not enabled so set the offset to
1867 * default queue and allocate one queue
1870 vsi->tc_config.tc_info[i].qoffset = 0;
1871 vsi->tc_config.tc_info[i].qcount = 1;
1872 vsi->tc_config.tc_info[i].netdev_tc = 0;
1876 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1879 /* Set actual Tx/Rx queue pairs */
1880 vsi->num_queue_pairs = offset;
1881 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1882 if (vsi->req_queue_pairs > 0)
1883 vsi->num_queue_pairs = vsi->req_queue_pairs;
1884 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1885 vsi->num_queue_pairs = pf->num_lan_msix;
1888 /* Scheduler section valid can only be set for ADD VSI */
1890 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1892 ctxt->info.up_enable_bits = enabled_tc;
1894 if (vsi->type == I40E_VSI_SRIOV) {
1895 ctxt->info.mapping_flags |=
1896 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1897 for (i = 0; i < vsi->num_queue_pairs; i++)
1898 ctxt->info.queue_mapping[i] =
1899 cpu_to_le16(vsi->base_queue + i);
1901 ctxt->info.mapping_flags |=
1902 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1903 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1905 ctxt->info.valid_sections |= cpu_to_le16(sections);
1909 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1910 * @netdev: the netdevice
1911 * @addr: address to add
1913 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1914 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1916 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1918 struct i40e_netdev_priv *np = netdev_priv(netdev);
1919 struct i40e_vsi *vsi = np->vsi;
1921 if (i40e_add_mac_filter(vsi, addr))
1928 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1929 * @netdev: the netdevice
1930 * @addr: address to add
1932 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1933 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1935 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1937 struct i40e_netdev_priv *np = netdev_priv(netdev);
1938 struct i40e_vsi *vsi = np->vsi;
1940 /* Under some circumstances, we might receive a request to delete
1941 * our own device address from our uc list. Because we store the
1942 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1943 * such requests and not delete our device address from this list.
1945 if (ether_addr_equal(addr, netdev->dev_addr))
1948 i40e_del_mac_filter(vsi, addr);
1954 * i40e_set_rx_mode - NDO callback to set the netdev filters
1955 * @netdev: network interface device structure
1957 static void i40e_set_rx_mode(struct net_device *netdev)
1959 struct i40e_netdev_priv *np = netdev_priv(netdev);
1960 struct i40e_vsi *vsi = np->vsi;
1962 spin_lock_bh(&vsi->mac_filter_hash_lock);
1964 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1965 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1967 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1969 /* check for other flag changes */
1970 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1971 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1972 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1977 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1978 * @vsi: Pointer to VSI struct
1979 * @from: Pointer to list which contains MAC filter entries - changes to
1980 * those entries needs to be undone.
1982 * MAC filter entries from this list were slated for deletion.
1984 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1985 struct hlist_head *from)
1987 struct i40e_mac_filter *f;
1988 struct hlist_node *h;
1990 hlist_for_each_entry_safe(f, h, from, hlist) {
1991 u64 key = i40e_addr_to_hkey(f->macaddr);
1993 /* Move the element back into MAC filter list*/
1994 hlist_del(&f->hlist);
1995 hash_add(vsi->mac_filter_hash, &f->hlist, key);
2000 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
2001 * @vsi: Pointer to vsi struct
2002 * @from: Pointer to list which contains MAC filter entries - changes to
2003 * those entries needs to be undone.
2005 * MAC filter entries from this list were slated for addition.
2007 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
2008 struct hlist_head *from)
2010 struct i40e_new_mac_filter *new;
2011 struct hlist_node *h;
2013 hlist_for_each_entry_safe(new, h, from, hlist) {
2014 /* We can simply free the wrapper structure */
2015 hlist_del(&new->hlist);
2021 * i40e_next_entry - Get the next non-broadcast filter from a list
2022 * @next: pointer to filter in list
2024 * Returns the next non-broadcast filter in the list. Required so that we
2025 * ignore broadcast filters within the list, since these are not handled via
2026 * the normal firmware update path.
2029 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2031 hlist_for_each_entry_continue(next, hlist) {
2032 if (!is_broadcast_ether_addr(next->f->macaddr))
2040 * i40e_update_filter_state - Update filter state based on return data
2042 * @count: Number of filters added
2043 * @add_list: return data from fw
2044 * @add_head: pointer to first filter in current batch
2046 * MAC filter entries from list were slated to be added to device. Returns
2047 * number of successful filters. Note that 0 does NOT mean success!
2050 i40e_update_filter_state(int count,
2051 struct i40e_aqc_add_macvlan_element_data *add_list,
2052 struct i40e_new_mac_filter *add_head)
2057 for (i = 0; i < count; i++) {
2058 /* Always check status of each filter. We don't need to check
2059 * the firmware return status because we pre-set the filter
2060 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2061 * request to the adminq. Thus, if it no longer matches then
2062 * we know the filter is active.
2064 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2065 add_head->state = I40E_FILTER_FAILED;
2067 add_head->state = I40E_FILTER_ACTIVE;
2071 add_head = i40e_next_filter(add_head);
2080 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2081 * @vsi: ptr to the VSI
2082 * @vsi_name: name to display in messages
2083 * @list: the list of filters to send to firmware
2084 * @num_del: the number of filters to delete
2085 * @retval: Set to -EIO on failure to delete
2087 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2088 * *retval instead of a return value so that success does not force ret_val to
2089 * be set to 0. This ensures that a sequence of calls to this function
2090 * preserve the previous value of *retval on successful delete.
2093 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2094 struct i40e_aqc_remove_macvlan_element_data *list,
2095 int num_del, int *retval)
2097 struct i40e_hw *hw = &vsi->back->hw;
2101 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2102 aq_err = hw->aq.asq_last_status;
2104 /* Explicitly ignore and do not report when firmware returns ENOENT */
2105 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2107 dev_info(&vsi->back->pdev->dev,
2108 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2109 vsi_name, i40e_stat_str(hw, aq_ret),
2110 i40e_aq_str(hw, aq_err));
2115 * i40e_aqc_add_filters - Request firmware to add a set of filters
2116 * @vsi: ptr to the VSI
2117 * @vsi_name: name to display in messages
2118 * @list: the list of filters to send to firmware
2119 * @add_head: Position in the add hlist
2120 * @num_add: the number of filters to add
2122 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2123 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2124 * space for more filters.
2127 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2128 struct i40e_aqc_add_macvlan_element_data *list,
2129 struct i40e_new_mac_filter *add_head,
2132 struct i40e_hw *hw = &vsi->back->hw;
2135 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
2136 aq_err = hw->aq.asq_last_status;
2137 fcnt = i40e_update_filter_state(num_add, list, add_head);
2139 if (fcnt != num_add) {
2140 if (vsi->type == I40E_VSI_MAIN) {
2141 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2142 dev_warn(&vsi->back->pdev->dev,
2143 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2144 i40e_aq_str(hw, aq_err), vsi_name);
2145 } else if (vsi->type == I40E_VSI_SRIOV ||
2146 vsi->type == I40E_VSI_VMDQ1 ||
2147 vsi->type == I40E_VSI_VMDQ2) {
2148 dev_warn(&vsi->back->pdev->dev,
2149 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2150 i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
2152 dev_warn(&vsi->back->pdev->dev,
2153 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2154 i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
2160 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2161 * @vsi: pointer to the VSI
2162 * @vsi_name: the VSI name
2165 * This function sets or clears the promiscuous broadcast flags for VLAN
2166 * filters in order to properly receive broadcast frames. Assumes that only
2167 * broadcast filters are passed.
2169 * Returns status indicating success or failure;
2172 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2173 struct i40e_mac_filter *f)
2175 bool enable = f->state == I40E_FILTER_NEW;
2176 struct i40e_hw *hw = &vsi->back->hw;
2179 if (f->vlan == I40E_VLAN_ANY) {
2180 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2185 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2193 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2194 dev_warn(&vsi->back->pdev->dev,
2195 "Error %s, forcing overflow promiscuous on %s\n",
2196 i40e_aq_str(hw, hw->aq.asq_last_status),
2204 * i40e_set_promiscuous - set promiscuous mode
2205 * @pf: board private structure
2206 * @promisc: promisc on or off
2208 * There are different ways of setting promiscuous mode on a PF depending on
2209 * what state/environment we're in. This identifies and sets it appropriately.
2210 * Returns 0 on success.
2212 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2214 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2215 struct i40e_hw *hw = &pf->hw;
2218 if (vsi->type == I40E_VSI_MAIN &&
2219 pf->lan_veb != I40E_NO_VEB &&
2220 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2221 /* set defport ON for Main VSI instead of true promisc
2222 * this way we will get all unicast/multicast and VLAN
2223 * promisc behavior but will not get VF or VMDq traffic
2224 * replicated on the Main VSI.
2227 aq_ret = i40e_aq_set_default_vsi(hw,
2231 aq_ret = i40e_aq_clear_default_vsi(hw,
2235 dev_info(&pf->pdev->dev,
2236 "Set default VSI failed, err %s, aq_err %s\n",
2237 i40e_stat_str(hw, aq_ret),
2238 i40e_aq_str(hw, hw->aq.asq_last_status));
2241 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2247 dev_info(&pf->pdev->dev,
2248 "set unicast promisc failed, err %s, aq_err %s\n",
2249 i40e_stat_str(hw, aq_ret),
2250 i40e_aq_str(hw, hw->aq.asq_last_status));
2252 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2257 dev_info(&pf->pdev->dev,
2258 "set multicast promisc failed, err %s, aq_err %s\n",
2259 i40e_stat_str(hw, aq_ret),
2260 i40e_aq_str(hw, hw->aq.asq_last_status));
2265 pf->cur_promisc = promisc;
2271 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2272 * @vsi: ptr to the VSI
2274 * Push any outstanding VSI filter changes through the AdminQ.
2276 * Returns 0 or error value
2278 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2280 struct hlist_head tmp_add_list, tmp_del_list;
2281 struct i40e_mac_filter *f;
2282 struct i40e_new_mac_filter *new, *add_head = NULL;
2283 struct i40e_hw *hw = &vsi->back->hw;
2284 bool old_overflow, new_overflow;
2285 unsigned int failed_filters = 0;
2286 unsigned int vlan_filters = 0;
2287 char vsi_name[16] = "PF";
2288 int filter_list_len = 0;
2289 i40e_status aq_ret = 0;
2290 u32 changed_flags = 0;
2291 struct hlist_node *h;
2300 /* empty array typed pointers, kcalloc later */
2301 struct i40e_aqc_add_macvlan_element_data *add_list;
2302 struct i40e_aqc_remove_macvlan_element_data *del_list;
2304 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2305 usleep_range(1000, 2000);
2308 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2311 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2312 vsi->current_netdev_flags = vsi->netdev->flags;
2315 INIT_HLIST_HEAD(&tmp_add_list);
2316 INIT_HLIST_HEAD(&tmp_del_list);
2318 if (vsi->type == I40E_VSI_SRIOV)
2319 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2320 else if (vsi->type != I40E_VSI_MAIN)
2321 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2323 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2324 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2326 spin_lock_bh(&vsi->mac_filter_hash_lock);
2327 /* Create a list of filters to delete. */
2328 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2329 if (f->state == I40E_FILTER_REMOVE) {
2330 /* Move the element into temporary del_list */
2331 hash_del(&f->hlist);
2332 hlist_add_head(&f->hlist, &tmp_del_list);
2334 /* Avoid counting removed filters */
2337 if (f->state == I40E_FILTER_NEW) {
2338 /* Create a temporary i40e_new_mac_filter */
2339 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2341 goto err_no_memory_locked;
2343 /* Store pointer to the real filter */
2345 new->state = f->state;
2347 /* Add it to the hash list */
2348 hlist_add_head(&new->hlist, &tmp_add_list);
2351 /* Count the number of active (current and new) VLAN
2352 * filters we have now. Does not count filters which
2353 * are marked for deletion.
2359 retval = i40e_correct_mac_vlan_filters(vsi,
2364 goto err_no_memory_locked;
2366 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2369 /* Now process 'del_list' outside the lock */
2370 if (!hlist_empty(&tmp_del_list)) {
2371 filter_list_len = hw->aq.asq_buf_size /
2372 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2373 list_size = filter_list_len *
2374 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2375 del_list = kzalloc(list_size, GFP_ATOMIC);
2379 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2382 /* handle broadcast filters by updating the broadcast
2383 * promiscuous flag and release filter list.
2385 if (is_broadcast_ether_addr(f->macaddr)) {
2386 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2388 hlist_del(&f->hlist);
2393 /* add to delete list */
2394 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2395 if (f->vlan == I40E_VLAN_ANY) {
2396 del_list[num_del].vlan_tag = 0;
2397 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2399 del_list[num_del].vlan_tag =
2400 cpu_to_le16((u16)(f->vlan));
2403 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2404 del_list[num_del].flags = cmd_flags;
2407 /* flush a full buffer */
2408 if (num_del == filter_list_len) {
2409 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2411 memset(del_list, 0, list_size);
2414 /* Release memory for MAC filter entries which were
2415 * synced up with HW.
2417 hlist_del(&f->hlist);
2422 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2430 if (!hlist_empty(&tmp_add_list)) {
2431 /* Do all the adds now. */
2432 filter_list_len = hw->aq.asq_buf_size /
2433 sizeof(struct i40e_aqc_add_macvlan_element_data);
2434 list_size = filter_list_len *
2435 sizeof(struct i40e_aqc_add_macvlan_element_data);
2436 add_list = kzalloc(list_size, GFP_ATOMIC);
2441 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2442 /* handle broadcast filters by updating the broadcast
2443 * promiscuous flag instead of adding a MAC filter.
2445 if (is_broadcast_ether_addr(new->f->macaddr)) {
2446 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2448 new->state = I40E_FILTER_FAILED;
2450 new->state = I40E_FILTER_ACTIVE;
2454 /* add to add array */
2458 ether_addr_copy(add_list[num_add].mac_addr,
2460 if (new->f->vlan == I40E_VLAN_ANY) {
2461 add_list[num_add].vlan_tag = 0;
2462 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2464 add_list[num_add].vlan_tag =
2465 cpu_to_le16((u16)(new->f->vlan));
2467 add_list[num_add].queue_number = 0;
2468 /* set invalid match method for later detection */
2469 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2470 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2471 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2474 /* flush a full buffer */
2475 if (num_add == filter_list_len) {
2476 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2478 memset(add_list, 0, list_size);
2483 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2486 /* Now move all of the filters from the temp add list back to
2489 spin_lock_bh(&vsi->mac_filter_hash_lock);
2490 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2491 /* Only update the state if we're still NEW */
2492 if (new->f->state == I40E_FILTER_NEW)
2493 new->f->state = new->state;
2494 hlist_del(&new->hlist);
2497 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2502 /* Determine the number of active and failed filters. */
2503 spin_lock_bh(&vsi->mac_filter_hash_lock);
2504 vsi->active_filters = 0;
2505 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2506 if (f->state == I40E_FILTER_ACTIVE)
2507 vsi->active_filters++;
2508 else if (f->state == I40E_FILTER_FAILED)
2511 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2513 /* Check if we are able to exit overflow promiscuous mode. We can
2514 * safely exit if we didn't just enter, we no longer have any failed
2515 * filters, and we have reduced filters below the threshold value.
2517 if (old_overflow && !failed_filters &&
2518 vsi->active_filters < vsi->promisc_threshold) {
2519 dev_info(&pf->pdev->dev,
2520 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2522 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2523 vsi->promisc_threshold = 0;
2526 /* if the VF is not trusted do not do promisc */
2527 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2528 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2532 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2534 /* If we are entering overflow promiscuous, we need to calculate a new
2535 * threshold for when we are safe to exit
2537 if (!old_overflow && new_overflow)
2538 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2540 /* check for changes in promiscuous modes */
2541 if (changed_flags & IFF_ALLMULTI) {
2542 bool cur_multipromisc;
2544 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2545 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2550 retval = i40e_aq_rc_to_posix(aq_ret,
2551 hw->aq.asq_last_status);
2552 dev_info(&pf->pdev->dev,
2553 "set multi promisc failed on %s, err %s aq_err %s\n",
2555 i40e_stat_str(hw, aq_ret),
2556 i40e_aq_str(hw, hw->aq.asq_last_status));
2558 dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
2560 cur_multipromisc ? "entering" : "leaving");
2564 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2567 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2569 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2571 retval = i40e_aq_rc_to_posix(aq_ret,
2572 hw->aq.asq_last_status);
2573 dev_info(&pf->pdev->dev,
2574 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2575 cur_promisc ? "on" : "off",
2577 i40e_stat_str(hw, aq_ret),
2578 i40e_aq_str(hw, hw->aq.asq_last_status));
2582 /* if something went wrong then set the changed flag so we try again */
2584 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2586 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2590 /* Restore elements on the temporary add and delete lists */
2591 spin_lock_bh(&vsi->mac_filter_hash_lock);
2592 err_no_memory_locked:
2593 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2594 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2595 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2597 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2598 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2603 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2604 * @pf: board private structure
2606 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2612 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2614 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
2615 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2619 for (v = 0; v < pf->num_alloc_vsi; v++) {
2621 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2622 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2625 /* come back and try again later */
2626 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2632 clear_bit(__I40E_VF_DISABLE, pf->state);
2636 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2639 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2641 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2642 return I40E_RXBUFFER_2048;
2644 return I40E_RXBUFFER_3072;
2648 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2649 * @netdev: network interface device structure
2650 * @new_mtu: new value for maximum frame size
2652 * Returns 0 on success, negative on failure
2654 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2656 struct i40e_netdev_priv *np = netdev_priv(netdev);
2657 struct i40e_vsi *vsi = np->vsi;
2658 struct i40e_pf *pf = vsi->back;
2660 if (i40e_enabled_xdp_vsi(vsi)) {
2661 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2663 if (frame_size > i40e_max_xdp_frame_size(vsi))
2667 netdev_dbg(netdev, "changing MTU from %d to %d\n",
2668 netdev->mtu, new_mtu);
2669 netdev->mtu = new_mtu;
2670 if (netif_running(netdev))
2671 i40e_vsi_reinit_locked(vsi);
2672 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2673 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2678 * i40e_ioctl - Access the hwtstamp interface
2679 * @netdev: network interface device structure
2680 * @ifr: interface request data
2681 * @cmd: ioctl command
2683 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2685 struct i40e_netdev_priv *np = netdev_priv(netdev);
2686 struct i40e_pf *pf = np->vsi->back;
2690 return i40e_ptp_get_ts_config(pf, ifr);
2692 return i40e_ptp_set_ts_config(pf, ifr);
2699 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2700 * @vsi: the vsi being adjusted
2702 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2704 struct i40e_vsi_context ctxt;
2707 /* Don't modify stripping options if a port VLAN is active */
2711 if ((vsi->info.valid_sections &
2712 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2713 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2714 return; /* already enabled */
2716 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2717 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2718 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2720 ctxt.seid = vsi->seid;
2721 ctxt.info = vsi->info;
2722 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2724 dev_info(&vsi->back->pdev->dev,
2725 "update vlan stripping failed, err %s aq_err %s\n",
2726 i40e_stat_str(&vsi->back->hw, ret),
2727 i40e_aq_str(&vsi->back->hw,
2728 vsi->back->hw.aq.asq_last_status));
2733 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2734 * @vsi: the vsi being adjusted
2736 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2738 struct i40e_vsi_context ctxt;
2741 /* Don't modify stripping options if a port VLAN is active */
2745 if ((vsi->info.valid_sections &
2746 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2747 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2748 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2749 return; /* already disabled */
2751 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2752 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2753 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2755 ctxt.seid = vsi->seid;
2756 ctxt.info = vsi->info;
2757 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2759 dev_info(&vsi->back->pdev->dev,
2760 "update vlan stripping failed, err %s aq_err %s\n",
2761 i40e_stat_str(&vsi->back->hw, ret),
2762 i40e_aq_str(&vsi->back->hw,
2763 vsi->back->hw.aq.asq_last_status));
2768 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2769 * @vsi: the vsi being configured
2770 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2772 * This is a helper function for adding a new MAC/VLAN filter with the
2773 * specified VLAN for each existing MAC address already in the hash table.
2774 * This function does *not* perform any accounting to update filters based on
2777 * NOTE: this function expects to be called while under the
2778 * mac_filter_hash_lock
2780 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2782 struct i40e_mac_filter *f, *add_f;
2783 struct hlist_node *h;
2786 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2787 if (f->state == I40E_FILTER_REMOVE)
2789 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2791 dev_info(&vsi->back->pdev->dev,
2792 "Could not add vlan filter %d for %pM\n",
2802 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2803 * @vsi: the VSI being configured
2804 * @vid: VLAN id to be added
2806 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2813 /* The network stack will attempt to add VID=0, with the intention to
2814 * receive priority tagged packets with a VLAN of 0. Our HW receives
2815 * these packets by default when configured to receive untagged
2816 * packets, so we don't need to add a filter for this case.
2817 * Additionally, HW interprets adding a VID=0 filter as meaning to
2818 * receive *only* tagged traffic and stops receiving untagged traffic.
2819 * Thus, we do not want to actually add a filter for VID=0
2824 /* Locked once because all functions invoked below iterates list*/
2825 spin_lock_bh(&vsi->mac_filter_hash_lock);
2826 err = i40e_add_vlan_all_mac(vsi, vid);
2827 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2831 /* schedule our worker thread which will take care of
2832 * applying the new filter changes
2834 i40e_service_event_schedule(vsi->back);
2839 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2840 * @vsi: the vsi being configured
2841 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2843 * This function should be used to remove all VLAN filters which match the
2844 * given VID. It does not schedule the service event and does not take the
2845 * mac_filter_hash_lock so it may be combined with other operations under
2846 * a single invocation of the mac_filter_hash_lock.
2848 * NOTE: this function expects to be called while under the
2849 * mac_filter_hash_lock
2851 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2853 struct i40e_mac_filter *f;
2854 struct hlist_node *h;
2857 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2859 __i40e_del_filter(vsi, f);
2864 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2865 * @vsi: the VSI being configured
2866 * @vid: VLAN id to be removed
2868 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2870 if (!vid || vsi->info.pvid)
2873 spin_lock_bh(&vsi->mac_filter_hash_lock);
2874 i40e_rm_vlan_all_mac(vsi, vid);
2875 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2877 /* schedule our worker thread which will take care of
2878 * applying the new filter changes
2880 i40e_service_event_schedule(vsi->back);
2884 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2885 * @netdev: network interface to be adjusted
2886 * @proto: unused protocol value
2887 * @vid: vlan id to be added
2889 * net_device_ops implementation for adding vlan ids
2891 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2892 __always_unused __be16 proto, u16 vid)
2894 struct i40e_netdev_priv *np = netdev_priv(netdev);
2895 struct i40e_vsi *vsi = np->vsi;
2898 if (vid >= VLAN_N_VID)
2901 ret = i40e_vsi_add_vlan(vsi, vid);
2903 set_bit(vid, vsi->active_vlans);
2909 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
2910 * @netdev: network interface to be adjusted
2911 * @proto: unused protocol value
2912 * @vid: vlan id to be added
2914 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2915 __always_unused __be16 proto, u16 vid)
2917 struct i40e_netdev_priv *np = netdev_priv(netdev);
2918 struct i40e_vsi *vsi = np->vsi;
2920 if (vid >= VLAN_N_VID)
2922 set_bit(vid, vsi->active_vlans);
2926 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2927 * @netdev: network interface to be adjusted
2928 * @proto: unused protocol value
2929 * @vid: vlan id to be removed
2931 * net_device_ops implementation for removing vlan ids
2933 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2934 __always_unused __be16 proto, u16 vid)
2936 struct i40e_netdev_priv *np = netdev_priv(netdev);
2937 struct i40e_vsi *vsi = np->vsi;
2939 /* return code is ignored as there is nothing a user
2940 * can do about failure to remove and a log message was
2941 * already printed from the other function
2943 i40e_vsi_kill_vlan(vsi, vid);
2945 clear_bit(vid, vsi->active_vlans);
2951 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2952 * @vsi: the vsi being brought back up
2954 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2961 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2962 i40e_vlan_stripping_enable(vsi);
2964 i40e_vlan_stripping_disable(vsi);
2966 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2967 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
2972 * i40e_vsi_add_pvid - Add pvid for the VSI
2973 * @vsi: the vsi being adjusted
2974 * @vid: the vlan id to set as a PVID
2976 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2978 struct i40e_vsi_context ctxt;
2981 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2982 vsi->info.pvid = cpu_to_le16(vid);
2983 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2984 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2985 I40E_AQ_VSI_PVLAN_EMOD_STR;
2987 ctxt.seid = vsi->seid;
2988 ctxt.info = vsi->info;
2989 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2991 dev_info(&vsi->back->pdev->dev,
2992 "add pvid failed, err %s aq_err %s\n",
2993 i40e_stat_str(&vsi->back->hw, ret),
2994 i40e_aq_str(&vsi->back->hw,
2995 vsi->back->hw.aq.asq_last_status));
3003 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
3004 * @vsi: the vsi being adjusted
3006 * Just use the vlan_rx_register() service to put it back to normal
3008 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
3012 i40e_vlan_stripping_disable(vsi);
3016 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
3017 * @vsi: ptr to the VSI
3019 * If this function returns with an error, then it's possible one or
3020 * more of the rings is populated (while the rest are not). It is the
3021 * callers duty to clean those orphaned rings.
3023 * Return 0 on success, negative on failure
3025 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3029 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3030 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3032 if (!i40e_enabled_xdp_vsi(vsi))
3035 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3036 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3042 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3043 * @vsi: ptr to the VSI
3045 * Free VSI's transmit software resources
3047 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3051 if (vsi->tx_rings) {
3052 for (i = 0; i < vsi->num_queue_pairs; i++)
3053 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3054 i40e_free_tx_resources(vsi->tx_rings[i]);
3057 if (vsi->xdp_rings) {
3058 for (i = 0; i < vsi->num_queue_pairs; i++)
3059 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3060 i40e_free_tx_resources(vsi->xdp_rings[i]);
3065 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3066 * @vsi: ptr to the VSI
3068 * If this function returns with an error, then it's possible one or
3069 * more of the rings is populated (while the rest are not). It is the
3070 * callers duty to clean those orphaned rings.
3072 * Return 0 on success, negative on failure
3074 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3078 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3079 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3084 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3085 * @vsi: ptr to the VSI
3087 * Free all receive software resources
3089 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3096 for (i = 0; i < vsi->num_queue_pairs; i++)
3097 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3098 i40e_free_rx_resources(vsi->rx_rings[i]);
3102 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3103 * @ring: The Tx ring to configure
3105 * This enables/disables XPS for a given Tx descriptor ring
3106 * based on the TCs enabled for the VSI that ring belongs to.
3108 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3112 if (!ring->q_vector || !ring->netdev || ring->ch)
3115 /* We only initialize XPS once, so as not to overwrite user settings */
3116 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3119 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3120 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3125 * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
3126 * @ring: The Tx or Rx ring
3128 * Returns the AF_XDP buffer pool or NULL.
3130 static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
3132 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3133 int qid = ring->queue_index;
3135 if (ring_is_xdp(ring))
3136 qid -= ring->vsi->alloc_queue_pairs;
3138 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3141 return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
3145 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3146 * @ring: The Tx ring to configure
3148 * Configure the Tx descriptor ring in the HMC context.
3150 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3152 struct i40e_vsi *vsi = ring->vsi;
3153 u16 pf_q = vsi->base_queue + ring->queue_index;
3154 struct i40e_hw *hw = &vsi->back->hw;
3155 struct i40e_hmc_obj_txq tx_ctx;
3156 i40e_status err = 0;
3159 if (ring_is_xdp(ring))
3160 ring->xsk_pool = i40e_xsk_pool(ring);
3162 /* some ATR related tx ring init */
3163 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3164 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3165 ring->atr_count = 0;
3167 ring->atr_sample_rate = 0;
3171 i40e_config_xps_tx_ring(ring);
3173 /* clear the context structure first */
3174 memset(&tx_ctx, 0, sizeof(tx_ctx));
3176 tx_ctx.new_context = 1;
3177 tx_ctx.base = (ring->dma / 128);
3178 tx_ctx.qlen = ring->count;
3179 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3180 I40E_FLAG_FD_ATR_ENABLED));
3181 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3182 /* FDIR VSI tx ring can still use RS bit and writebacks */
3183 if (vsi->type != I40E_VSI_FDIR)
3184 tx_ctx.head_wb_ena = 1;
3185 tx_ctx.head_wb_addr = ring->dma +
3186 (ring->count * sizeof(struct i40e_tx_desc));
3188 /* As part of VSI creation/update, FW allocates certain
3189 * Tx arbitration queue sets for each TC enabled for
3190 * the VSI. The FW returns the handles to these queue
3191 * sets as part of the response buffer to Add VSI,
3192 * Update VSI, etc. AQ commands. It is expected that
3193 * these queue set handles be associated with the Tx
3194 * queues by the driver as part of the TX queue context
3195 * initialization. This has to be done regardless of
3196 * DCB as by default everything is mapped to TC0.
3201 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3204 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3206 tx_ctx.rdylist_act = 0;
3208 /* clear the context in the HMC */
3209 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3211 dev_info(&vsi->back->pdev->dev,
3212 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3213 ring->queue_index, pf_q, err);
3217 /* set the context in the HMC */
3218 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3220 dev_info(&vsi->back->pdev->dev,
3221 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3222 ring->queue_index, pf_q, err);
3226 /* Now associate this queue with this PCI function */
3228 if (ring->ch->type == I40E_VSI_VMDQ2)
3229 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3233 qtx_ctl |= (ring->ch->vsi_number <<
3234 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3235 I40E_QTX_CTL_VFVM_INDX_MASK;
3237 if (vsi->type == I40E_VSI_VMDQ2) {
3238 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3239 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3240 I40E_QTX_CTL_VFVM_INDX_MASK;
3242 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3246 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3247 I40E_QTX_CTL_PF_INDX_MASK);
3248 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3251 /* cache tail off for easier writes later */
3252 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3258 * i40e_configure_rx_ring - Configure a receive ring context
3259 * @ring: The Rx ring to configure
3261 * Configure the Rx descriptor ring in the HMC context.
3263 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3265 struct i40e_vsi *vsi = ring->vsi;
3266 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3267 u16 pf_q = vsi->base_queue + ring->queue_index;
3268 struct i40e_hw *hw = &vsi->back->hw;
3269 struct i40e_hmc_obj_rxq rx_ctx;
3270 i40e_status err = 0;
3274 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3276 /* clear the context structure first */
3277 memset(&rx_ctx, 0, sizeof(rx_ctx));
3279 if (ring->vsi->type == I40E_VSI_MAIN)
3280 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3283 ring->xsk_pool = i40e_xsk_pool(ring);
3284 if (ring->xsk_pool) {
3285 ret = i40e_alloc_rx_bi_zc(ring);
3289 xsk_pool_get_rx_frame_size(ring->xsk_pool);
3290 /* For AF_XDP ZC, we disallow packets to span on
3291 * multiple buffers, thus letting us skip that
3292 * handling in the fast-path.
3295 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3296 MEM_TYPE_XSK_BUFF_POOL,
3300 dev_info(&vsi->back->pdev->dev,
3301 "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
3305 ret = i40e_alloc_rx_bi(ring);
3308 ring->rx_buf_len = vsi->rx_buf_len;
3309 if (ring->vsi->type == I40E_VSI_MAIN) {
3310 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3311 MEM_TYPE_PAGE_SHARED,
3318 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3319 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3321 rx_ctx.base = (ring->dma / 128);
3322 rx_ctx.qlen = ring->count;
3324 /* use 32 byte descriptors */
3327 /* descriptor type is always zero
3330 rx_ctx.hsplit_0 = 0;
3332 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3333 if (hw->revision_id == 0)
3334 rx_ctx.lrxqthresh = 0;
3336 rx_ctx.lrxqthresh = 1;
3337 rx_ctx.crcstrip = 1;
3339 /* this controls whether VLAN is stripped from inner headers */
3341 /* set the prefena field to 1 because the manual says to */
3344 /* clear the context in the HMC */
3345 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3347 dev_info(&vsi->back->pdev->dev,
3348 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3349 ring->queue_index, pf_q, err);
3353 /* set the context in the HMC */
3354 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3356 dev_info(&vsi->back->pdev->dev,
3357 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3358 ring->queue_index, pf_q, err);
3362 /* configure Rx buffer alignment */
3363 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3364 clear_ring_build_skb_enabled(ring);
3366 set_ring_build_skb_enabled(ring);
3368 /* cache tail for quicker writes, and clear the reg before use */
3369 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3370 writel(0, ring->tail);
3372 if (ring->xsk_pool) {
3373 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
3374 ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
3376 ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3379 /* Log this in case the user has forgotten to give the kernel
3380 * any buffers, even later in the application.
3382 dev_info(&vsi->back->pdev->dev,
3383 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3384 ring->xsk_pool ? "AF_XDP ZC enabled " : "",
3385 ring->queue_index, pf_q);
3392 * i40e_vsi_configure_tx - Configure the VSI for Tx
3393 * @vsi: VSI structure describing this set of rings and resources
3395 * Configure the Tx VSI for operation.
3397 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3402 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3403 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3405 if (err || !i40e_enabled_xdp_vsi(vsi))
3408 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3409 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3415 * i40e_vsi_configure_rx - Configure the VSI for Rx
3416 * @vsi: the VSI being configured
3418 * Configure the Rx VSI for operation.
3420 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3425 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3426 vsi->max_frame = I40E_MAX_RXBUFFER;
3427 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3428 #if (PAGE_SIZE < 8192)
3429 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3430 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3431 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3432 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3435 vsi->max_frame = I40E_MAX_RXBUFFER;
3436 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3440 /* set up individual rings */
3441 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3442 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3448 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3449 * @vsi: ptr to the VSI
3451 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3453 struct i40e_ring *tx_ring, *rx_ring;
3454 u16 qoffset, qcount;
3457 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3458 /* Reset the TC information */
3459 for (i = 0; i < vsi->num_queue_pairs; i++) {
3460 rx_ring = vsi->rx_rings[i];
3461 tx_ring = vsi->tx_rings[i];
3462 rx_ring->dcb_tc = 0;
3463 tx_ring->dcb_tc = 0;
3468 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3469 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3472 qoffset = vsi->tc_config.tc_info[n].qoffset;
3473 qcount = vsi->tc_config.tc_info[n].qcount;
3474 for (i = qoffset; i < (qoffset + qcount); i++) {
3475 rx_ring = vsi->rx_rings[i];
3476 tx_ring = vsi->tx_rings[i];
3477 rx_ring->dcb_tc = n;
3478 tx_ring->dcb_tc = n;
3484 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3485 * @vsi: ptr to the VSI
3487 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3490 i40e_set_rx_mode(vsi->netdev);
3494 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3495 * @vsi: Pointer to the targeted VSI
3497 * This function replays the hlist on the hw where all the SB Flow Director
3498 * filters were saved.
3500 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3502 struct i40e_fdir_filter *filter;
3503 struct i40e_pf *pf = vsi->back;
3504 struct hlist_node *node;
3506 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3509 /* Reset FDir counters as we're replaying all existing filters */
3510 pf->fd_tcp4_filter_cnt = 0;
3511 pf->fd_udp4_filter_cnt = 0;
3512 pf->fd_sctp4_filter_cnt = 0;
3513 pf->fd_ip4_filter_cnt = 0;
3515 hlist_for_each_entry_safe(filter, node,
3516 &pf->fdir_filter_list, fdir_node) {
3517 i40e_add_del_fdir(vsi, filter, true);
3522 * i40e_vsi_configure - Set up the VSI for action
3523 * @vsi: the VSI being configured
3525 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3529 i40e_set_vsi_rx_mode(vsi);
3530 i40e_restore_vlan(vsi);
3531 i40e_vsi_config_dcb_rings(vsi);
3532 err = i40e_vsi_configure_tx(vsi);
3534 err = i40e_vsi_configure_rx(vsi);
3540 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3541 * @vsi: the VSI being configured
3543 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3545 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3546 struct i40e_pf *pf = vsi->back;
3547 struct i40e_hw *hw = &pf->hw;
3552 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3553 * and PFINT_LNKLSTn registers, e.g.:
3554 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3556 qp = vsi->base_queue;
3557 vector = vsi->base_vector;
3558 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3559 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3561 q_vector->rx.next_update = jiffies + 1;
3562 q_vector->rx.target_itr =
3563 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3564 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3565 q_vector->rx.target_itr >> 1);
3566 q_vector->rx.current_itr = q_vector->rx.target_itr;
3568 q_vector->tx.next_update = jiffies + 1;
3569 q_vector->tx.target_itr =
3570 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3571 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3572 q_vector->tx.target_itr >> 1);
3573 q_vector->tx.current_itr = q_vector->tx.target_itr;
3575 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3576 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3578 /* Linked list for the queuepairs assigned to this vector */
3579 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3580 for (q = 0; q < q_vector->num_ringpairs; q++) {
3581 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3584 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3585 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3586 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3587 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3588 (I40E_QUEUE_TYPE_TX <<
3589 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3591 wr32(hw, I40E_QINT_RQCTL(qp), val);
3594 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3595 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3596 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3597 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3598 (I40E_QUEUE_TYPE_TX <<
3599 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3601 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3604 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3605 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3606 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3607 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3608 (I40E_QUEUE_TYPE_RX <<
3609 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3611 /* Terminate the linked list */
3612 if (q == (q_vector->num_ringpairs - 1))
3613 val |= (I40E_QUEUE_END_OF_LIST <<
3614 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3616 wr32(hw, I40E_QINT_TQCTL(qp), val);
3625 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3626 * @pf: pointer to private device data structure
3628 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3630 struct i40e_hw *hw = &pf->hw;
3633 /* clear things first */
3634 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3635 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3637 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3638 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3639 I40E_PFINT_ICR0_ENA_GRST_MASK |
3640 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3641 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3642 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3643 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3644 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3646 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3647 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3649 if (pf->flags & I40E_FLAG_PTP)
3650 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3652 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3654 /* SW_ITR_IDX = 0, but don't change INTENA */
3655 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3656 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3658 /* OTHER_ITR_IDX = 0 */
3659 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3663 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3664 * @vsi: the VSI being configured
3666 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3668 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3669 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3670 struct i40e_pf *pf = vsi->back;
3671 struct i40e_hw *hw = &pf->hw;
3674 /* set the ITR configuration */
3675 q_vector->rx.next_update = jiffies + 1;
3676 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3677 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
3678 q_vector->rx.current_itr = q_vector->rx.target_itr;
3679 q_vector->tx.next_update = jiffies + 1;
3680 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3681 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
3682 q_vector->tx.current_itr = q_vector->tx.target_itr;
3684 i40e_enable_misc_int_causes(pf);
3686 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3687 wr32(hw, I40E_PFINT_LNKLST0, 0);
3689 /* Associate the queue pair to the vector and enable the queue int */
3690 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3691 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3692 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3693 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3695 wr32(hw, I40E_QINT_RQCTL(0), val);
3697 if (i40e_enabled_xdp_vsi(vsi)) {
3698 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3699 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3701 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3703 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3706 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3707 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3708 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3710 wr32(hw, I40E_QINT_TQCTL(0), val);
3715 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3716 * @pf: board private structure
3718 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3720 struct i40e_hw *hw = &pf->hw;
3722 wr32(hw, I40E_PFINT_DYN_CTL0,
3723 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3728 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3729 * @pf: board private structure
3731 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3733 struct i40e_hw *hw = &pf->hw;
3736 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3737 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3738 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3740 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3745 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3746 * @irq: interrupt number
3747 * @data: pointer to a q_vector
3749 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3751 struct i40e_q_vector *q_vector = data;
3753 if (!q_vector->tx.ring && !q_vector->rx.ring)
3756 napi_schedule_irqoff(&q_vector->napi);
3762 * i40e_irq_affinity_notify - Callback for affinity changes
3763 * @notify: context as to what irq was changed
3764 * @mask: the new affinity mask
3766 * This is a callback function used by the irq_set_affinity_notifier function
3767 * so that we may register to receive changes to the irq affinity masks.
3769 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3770 const cpumask_t *mask)
3772 struct i40e_q_vector *q_vector =
3773 container_of(notify, struct i40e_q_vector, affinity_notify);
3775 cpumask_copy(&q_vector->affinity_mask, mask);
3779 * i40e_irq_affinity_release - Callback for affinity notifier release
3780 * @ref: internal core kernel usage
3782 * This is a callback function used by the irq_set_affinity_notifier function
3783 * to inform the current notification subscriber that they will no longer
3784 * receive notifications.
3786 static void i40e_irq_affinity_release(struct kref *ref) {}
3789 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3790 * @vsi: the VSI being configured
3791 * @basename: name for the vector
3793 * Allocates MSI-X vectors and requests interrupts from the kernel.
3795 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3797 int q_vectors = vsi->num_q_vectors;
3798 struct i40e_pf *pf = vsi->back;
3799 int base = vsi->base_vector;
3806 for (vector = 0; vector < q_vectors; vector++) {
3807 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3809 irq_num = pf->msix_entries[base + vector].vector;
3811 if (q_vector->tx.ring && q_vector->rx.ring) {
3812 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3813 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3815 } else if (q_vector->rx.ring) {
3816 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3817 "%s-%s-%d", basename, "rx", rx_int_idx++);
3818 } else if (q_vector->tx.ring) {
3819 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3820 "%s-%s-%d", basename, "tx", tx_int_idx++);
3822 /* skip this unused q_vector */
3825 err = request_irq(irq_num,
3831 dev_info(&pf->pdev->dev,
3832 "MSIX request_irq failed, error: %d\n", err);
3833 goto free_queue_irqs;
3836 /* register for affinity change notifications */
3837 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3838 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3839 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3840 /* Spread affinity hints out across online CPUs.
3842 * get_cpu_mask returns a static constant mask with
3843 * a permanent lifetime so it's ok to pass to
3844 * irq_set_affinity_hint without making a copy.
3846 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3847 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3850 vsi->irqs_ready = true;
3856 irq_num = pf->msix_entries[base + vector].vector;
3857 irq_set_affinity_notifier(irq_num, NULL);
3858 irq_set_affinity_hint(irq_num, NULL);
3859 free_irq(irq_num, &vsi->q_vectors[vector]);
3865 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3866 * @vsi: the VSI being un-configured
3868 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3870 struct i40e_pf *pf = vsi->back;
3871 struct i40e_hw *hw = &pf->hw;
3872 int base = vsi->base_vector;
3875 /* disable interrupt causation from each queue */
3876 for (i = 0; i < vsi->num_queue_pairs; i++) {
3879 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3880 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3881 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3883 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3884 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3885 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3887 if (!i40e_enabled_xdp_vsi(vsi))
3889 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3892 /* disable each interrupt */
3893 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3894 for (i = vsi->base_vector;
3895 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3896 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3899 for (i = 0; i < vsi->num_q_vectors; i++)
3900 synchronize_irq(pf->msix_entries[i + base].vector);
3902 /* Legacy and MSI mode - this stops all interrupt handling */
3903 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3904 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3906 synchronize_irq(pf->pdev->irq);
3911 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3912 * @vsi: the VSI being configured
3914 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3916 struct i40e_pf *pf = vsi->back;
3919 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3920 for (i = 0; i < vsi->num_q_vectors; i++)
3921 i40e_irq_dynamic_enable(vsi, i);
3923 i40e_irq_dynamic_enable_icr0(pf);
3926 i40e_flush(&pf->hw);
3931 * i40e_free_misc_vector - Free the vector that handles non-queue events
3932 * @pf: board private structure
3934 static void i40e_free_misc_vector(struct i40e_pf *pf)
3937 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3938 i40e_flush(&pf->hw);
3940 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3941 synchronize_irq(pf->msix_entries[0].vector);
3942 free_irq(pf->msix_entries[0].vector, pf);
3943 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3948 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3949 * @irq: interrupt number
3950 * @data: pointer to a q_vector
3952 * This is the handler used for all MSI/Legacy interrupts, and deals
3953 * with both queue and non-queue interrupts. This is also used in
3954 * MSIX mode to handle the non-queue interrupts.
3956 static irqreturn_t i40e_intr(int irq, void *data)
3958 struct i40e_pf *pf = (struct i40e_pf *)data;
3959 struct i40e_hw *hw = &pf->hw;
3960 irqreturn_t ret = IRQ_NONE;
3961 u32 icr0, icr0_remaining;
3964 icr0 = rd32(hw, I40E_PFINT_ICR0);
3965 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3967 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3968 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3971 /* if interrupt but no bits showing, must be SWINT */
3972 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3973 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3976 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3977 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3978 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3979 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3980 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3983 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3984 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3985 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3986 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3988 /* We do not have a way to disarm Queue causes while leaving
3989 * interrupt enabled for all other causes, ideally
3990 * interrupt should be disabled while we are in NAPI but
3991 * this is not a performance path and napi_schedule()
3992 * can deal with rescheduling.
3994 if (!test_bit(__I40E_DOWN, pf->state))
3995 napi_schedule_irqoff(&q_vector->napi);
3998 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3999 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4000 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
4001 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
4004 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
4005 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4006 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
4009 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4010 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
4011 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4014 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
4015 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4016 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
4017 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
4018 val = rd32(hw, I40E_GLGEN_RSTAT);
4019 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
4020 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4021 if (val == I40E_RESET_CORER) {
4023 } else if (val == I40E_RESET_GLOBR) {
4025 } else if (val == I40E_RESET_EMPR) {
4027 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4031 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4032 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4033 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4034 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4035 rd32(hw, I40E_PFHMC_ERRORINFO),
4036 rd32(hw, I40E_PFHMC_ERRORDATA));
4039 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4040 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4042 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
4043 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4044 i40e_ptp_tx_hwtstamp(pf);
4048 /* If a critical error is pending we have no choice but to reset the
4050 * Report and mask out any remaining unexpected interrupts.
4052 icr0_remaining = icr0 & ena_mask;
4053 if (icr0_remaining) {
4054 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4056 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4057 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4058 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4059 dev_info(&pf->pdev->dev, "device will be reset\n");
4060 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4061 i40e_service_event_schedule(pf);
4063 ena_mask &= ~icr0_remaining;
4068 /* re-enable interrupt causes */
4069 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4070 if (!test_bit(__I40E_DOWN, pf->state) ||
4071 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4072 i40e_service_event_schedule(pf);
4073 i40e_irq_dynamic_enable_icr0(pf);
4080 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4081 * @tx_ring: tx ring to clean
4082 * @budget: how many cleans we're allowed
4084 * Returns true if there's any budget left (e.g. the clean is finished)
4086 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4088 struct i40e_vsi *vsi = tx_ring->vsi;
4089 u16 i = tx_ring->next_to_clean;
4090 struct i40e_tx_buffer *tx_buf;
4091 struct i40e_tx_desc *tx_desc;
4093 tx_buf = &tx_ring->tx_bi[i];
4094 tx_desc = I40E_TX_DESC(tx_ring, i);
4095 i -= tx_ring->count;
4098 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4100 /* if next_to_watch is not set then there is no work pending */
4104 /* prevent any other reads prior to eop_desc */
4107 /* if the descriptor isn't done, no work yet to do */
4108 if (!(eop_desc->cmd_type_offset_bsz &
4109 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4112 /* clear next_to_watch to prevent false hangs */
4113 tx_buf->next_to_watch = NULL;
4115 tx_desc->buffer_addr = 0;
4116 tx_desc->cmd_type_offset_bsz = 0;
4117 /* move past filter desc */
4122 i -= tx_ring->count;
4123 tx_buf = tx_ring->tx_bi;
4124 tx_desc = I40E_TX_DESC(tx_ring, 0);
4126 /* unmap skb header data */
4127 dma_unmap_single(tx_ring->dev,
4128 dma_unmap_addr(tx_buf, dma),
4129 dma_unmap_len(tx_buf, len),
4131 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4132 kfree(tx_buf->raw_buf);
4134 tx_buf->raw_buf = NULL;
4135 tx_buf->tx_flags = 0;
4136 tx_buf->next_to_watch = NULL;
4137 dma_unmap_len_set(tx_buf, len, 0);
4138 tx_desc->buffer_addr = 0;
4139 tx_desc->cmd_type_offset_bsz = 0;
4141 /* move us past the eop_desc for start of next FD desc */
4146 i -= tx_ring->count;
4147 tx_buf = tx_ring->tx_bi;
4148 tx_desc = I40E_TX_DESC(tx_ring, 0);
4151 /* update budget accounting */
4153 } while (likely(budget));
4155 i += tx_ring->count;
4156 tx_ring->next_to_clean = i;
4158 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4159 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4165 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4166 * @irq: interrupt number
4167 * @data: pointer to a q_vector
4169 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4171 struct i40e_q_vector *q_vector = data;
4172 struct i40e_vsi *vsi;
4174 if (!q_vector->tx.ring)
4177 vsi = q_vector->tx.ring->vsi;
4178 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4184 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4185 * @vsi: the VSI being configured
4186 * @v_idx: vector index
4187 * @qp_idx: queue pair index
4189 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4191 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4192 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4193 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4195 tx_ring->q_vector = q_vector;
4196 tx_ring->next = q_vector->tx.ring;
4197 q_vector->tx.ring = tx_ring;
4198 q_vector->tx.count++;
4200 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4201 if (i40e_enabled_xdp_vsi(vsi)) {
4202 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4204 xdp_ring->q_vector = q_vector;
4205 xdp_ring->next = q_vector->tx.ring;
4206 q_vector->tx.ring = xdp_ring;
4207 q_vector->tx.count++;
4210 rx_ring->q_vector = q_vector;
4211 rx_ring->next = q_vector->rx.ring;
4212 q_vector->rx.ring = rx_ring;
4213 q_vector->rx.count++;
4217 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4218 * @vsi: the VSI being configured
4220 * This function maps descriptor rings to the queue-specific vectors
4221 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4222 * one vector per queue pair, but on a constrained vector budget, we
4223 * group the queue pairs as "efficiently" as possible.
4225 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4227 int qp_remaining = vsi->num_queue_pairs;
4228 int q_vectors = vsi->num_q_vectors;
4233 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4234 * group them so there are multiple queues per vector.
4235 * It is also important to go through all the vectors available to be
4236 * sure that if we don't use all the vectors, that the remaining vectors
4237 * are cleared. This is especially important when decreasing the
4238 * number of queues in use.
4240 for (; v_start < q_vectors; v_start++) {
4241 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4243 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4245 q_vector->num_ringpairs = num_ringpairs;
4246 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4248 q_vector->rx.count = 0;
4249 q_vector->tx.count = 0;
4250 q_vector->rx.ring = NULL;
4251 q_vector->tx.ring = NULL;
4253 while (num_ringpairs--) {
4254 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4262 * i40e_vsi_request_irq - Request IRQ from the OS
4263 * @vsi: the VSI being configured
4264 * @basename: name for the vector
4266 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4268 struct i40e_pf *pf = vsi->back;
4271 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4272 err = i40e_vsi_request_irq_msix(vsi, basename);
4273 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4274 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4277 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4281 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4286 #ifdef CONFIG_NET_POLL_CONTROLLER
4288 * i40e_netpoll - A Polling 'interrupt' handler
4289 * @netdev: network interface device structure
4291 * This is used by netconsole to send skbs without having to re-enable
4292 * interrupts. It's not called while the normal interrupt routine is executing.
4294 static void i40e_netpoll(struct net_device *netdev)
4296 struct i40e_netdev_priv *np = netdev_priv(netdev);
4297 struct i40e_vsi *vsi = np->vsi;
4298 struct i40e_pf *pf = vsi->back;
4301 /* if interface is down do nothing */
4302 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4305 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4306 for (i = 0; i < vsi->num_q_vectors; i++)
4307 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4309 i40e_intr(pf->pdev->irq, netdev);
4314 #define I40E_QTX_ENA_WAIT_COUNT 50
4317 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4318 * @pf: the PF being configured
4319 * @pf_q: the PF queue
4320 * @enable: enable or disable state of the queue
4322 * This routine will wait for the given Tx queue of the PF to reach the
4323 * enabled or disabled state.
4324 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4325 * multiple retries; else will return 0 in case of success.
4327 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4332 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4333 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4334 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4337 usleep_range(10, 20);
4339 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4346 * i40e_control_tx_q - Start or stop a particular Tx queue
4347 * @pf: the PF structure
4348 * @pf_q: the PF queue to configure
4349 * @enable: start or stop the queue
4351 * This function enables or disables a single queue. Note that any delay
4352 * required after the operation is expected to be handled by the caller of
4355 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4357 struct i40e_hw *hw = &pf->hw;
4361 /* warn the TX unit of coming changes */
4362 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4364 usleep_range(10, 20);
4366 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4367 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4368 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4369 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4371 usleep_range(1000, 2000);
4374 /* Skip if the queue is already in the requested state */
4375 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4378 /* turn on/off the queue */
4380 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4381 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4383 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4386 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4390 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4392 * @pf: the PF structure
4393 * @pf_q: the PF queue to configure
4394 * @is_xdp: true if the queue is used for XDP
4395 * @enable: start or stop the queue
4397 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4398 bool is_xdp, bool enable)
4402 i40e_control_tx_q(pf, pf_q, enable);
4404 /* wait for the change to finish */
4405 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4407 dev_info(&pf->pdev->dev,
4408 "VSI seid %d %sTx ring %d %sable timeout\n",
4409 seid, (is_xdp ? "XDP " : ""), pf_q,
4410 (enable ? "en" : "dis"));
4417 * i40e_vsi_control_tx - Start or stop a VSI's rings
4418 * @vsi: the VSI being configured
4419 * @enable: start or stop the rings
4421 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4423 struct i40e_pf *pf = vsi->back;
4424 int i, pf_q, ret = 0;
4426 pf_q = vsi->base_queue;
4427 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4428 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4430 false /*is xdp*/, enable);
4434 if (!i40e_enabled_xdp_vsi(vsi))
4437 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4438 pf_q + vsi->alloc_queue_pairs,
4439 true /*is xdp*/, enable);
4447 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4448 * @pf: the PF being configured
4449 * @pf_q: the PF queue
4450 * @enable: enable or disable state of the queue
4452 * This routine will wait for the given Rx queue of the PF to reach the
4453 * enabled or disabled state.
4454 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4455 * multiple retries; else will return 0 in case of success.
4457 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4462 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4463 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4464 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4467 usleep_range(10, 20);
4469 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4476 * i40e_control_rx_q - Start or stop a particular Rx queue
4477 * @pf: the PF structure
4478 * @pf_q: the PF queue to configure
4479 * @enable: start or stop the queue
4481 * This function enables or disables a single queue. Note that
4482 * any delay required after the operation is expected to be
4483 * handled by the caller of this function.
4485 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4487 struct i40e_hw *hw = &pf->hw;
4491 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4492 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4493 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4494 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4496 usleep_range(1000, 2000);
4499 /* Skip if the queue is already in the requested state */
4500 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4503 /* turn on/off the queue */
4505 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4507 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4509 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4513 * i40e_control_wait_rx_q
4514 * @pf: the PF structure
4515 * @pf_q: queue being configured
4516 * @enable: start or stop the rings
4518 * This function enables or disables a single queue along with waiting
4519 * for the change to finish. The caller of this function should handle
4520 * the delays needed in the case of disabling queues.
4522 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4526 i40e_control_rx_q(pf, pf_q, enable);
4528 /* wait for the change to finish */
4529 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4537 * i40e_vsi_control_rx - Start or stop a VSI's rings
4538 * @vsi: the VSI being configured
4539 * @enable: start or stop the rings
4541 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4543 struct i40e_pf *pf = vsi->back;
4544 int i, pf_q, ret = 0;
4546 pf_q = vsi->base_queue;
4547 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4548 ret = i40e_control_wait_rx_q(pf, pf_q, enable);
4550 dev_info(&pf->pdev->dev,
4551 "VSI seid %d Rx ring %d %sable timeout\n",
4552 vsi->seid, pf_q, (enable ? "en" : "dis"));
4557 /* Due to HW errata, on Rx disable only, the register can indicate done
4558 * before it really is. Needs 50ms to be sure
4567 * i40e_vsi_start_rings - Start a VSI's rings
4568 * @vsi: the VSI being configured
4570 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4574 /* do rx first for enable and last for disable */
4575 ret = i40e_vsi_control_rx(vsi, true);
4578 ret = i40e_vsi_control_tx(vsi, true);
4584 * i40e_vsi_stop_rings - Stop a VSI's rings
4585 * @vsi: the VSI being configured
4587 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4589 /* When port TX is suspended, don't wait */
4590 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4591 return i40e_vsi_stop_rings_no_wait(vsi);
4593 /* do rx first for enable and last for disable
4594 * Ignore return value, we need to shutdown whatever we can
4596 i40e_vsi_control_tx(vsi, false);
4597 i40e_vsi_control_rx(vsi, false);
4601 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4602 * @vsi: the VSI being shutdown
4604 * This function stops all the rings for a VSI but does not delay to verify
4605 * that rings have been disabled. It is expected that the caller is shutting
4606 * down multiple VSIs at once and will delay together for all the VSIs after
4607 * initiating the shutdown. This is particularly useful for shutting down lots
4608 * of VFs together. Otherwise, a large delay can be incurred while configuring
4609 * each VSI in serial.
4611 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4613 struct i40e_pf *pf = vsi->back;
4616 pf_q = vsi->base_queue;
4617 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4618 i40e_control_tx_q(pf, pf_q, false);
4619 i40e_control_rx_q(pf, pf_q, false);
4624 * i40e_vsi_free_irq - Free the irq association with the OS
4625 * @vsi: the VSI being configured
4627 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4629 struct i40e_pf *pf = vsi->back;
4630 struct i40e_hw *hw = &pf->hw;
4631 int base = vsi->base_vector;
4635 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4636 if (!vsi->q_vectors)
4639 if (!vsi->irqs_ready)
4642 vsi->irqs_ready = false;
4643 for (i = 0; i < vsi->num_q_vectors; i++) {
4648 irq_num = pf->msix_entries[vector].vector;
4650 /* free only the irqs that were actually requested */
4651 if (!vsi->q_vectors[i] ||
4652 !vsi->q_vectors[i]->num_ringpairs)
4655 /* clear the affinity notifier in the IRQ descriptor */
4656 irq_set_affinity_notifier(irq_num, NULL);
4657 /* remove our suggested affinity mask for this IRQ */
4658 irq_set_affinity_hint(irq_num, NULL);
4659 synchronize_irq(irq_num);
4660 free_irq(irq_num, vsi->q_vectors[i]);
4662 /* Tear down the interrupt queue link list
4664 * We know that they come in pairs and always
4665 * the Rx first, then the Tx. To clear the
4666 * link list, stick the EOL value into the
4667 * next_q field of the registers.
4669 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4670 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4671 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4672 val |= I40E_QUEUE_END_OF_LIST
4673 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4674 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4676 while (qp != I40E_QUEUE_END_OF_LIST) {
4679 val = rd32(hw, I40E_QINT_RQCTL(qp));
4681 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4682 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4683 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4684 I40E_QINT_RQCTL_INTEVENT_MASK);
4686 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4687 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4689 wr32(hw, I40E_QINT_RQCTL(qp), val);
4691 val = rd32(hw, I40E_QINT_TQCTL(qp));
4693 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4694 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4696 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4697 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4698 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4699 I40E_QINT_TQCTL_INTEVENT_MASK);
4701 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4702 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4704 wr32(hw, I40E_QINT_TQCTL(qp), val);
4709 free_irq(pf->pdev->irq, pf);
4711 val = rd32(hw, I40E_PFINT_LNKLST0);
4712 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4713 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4714 val |= I40E_QUEUE_END_OF_LIST
4715 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4716 wr32(hw, I40E_PFINT_LNKLST0, val);
4718 val = rd32(hw, I40E_QINT_RQCTL(qp));
4719 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4720 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4721 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4722 I40E_QINT_RQCTL_INTEVENT_MASK);
4724 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4725 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4727 wr32(hw, I40E_QINT_RQCTL(qp), val);
4729 val = rd32(hw, I40E_QINT_TQCTL(qp));
4731 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4732 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4733 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4734 I40E_QINT_TQCTL_INTEVENT_MASK);
4736 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4737 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4739 wr32(hw, I40E_QINT_TQCTL(qp), val);
4744 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4745 * @vsi: the VSI being configured
4746 * @v_idx: Index of vector to be freed
4748 * This function frees the memory allocated to the q_vector. In addition if
4749 * NAPI is enabled it will delete any references to the NAPI struct prior
4750 * to freeing the q_vector.
4752 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4754 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4755 struct i40e_ring *ring;
4760 /* disassociate q_vector from rings */
4761 i40e_for_each_ring(ring, q_vector->tx)
4762 ring->q_vector = NULL;
4764 i40e_for_each_ring(ring, q_vector->rx)
4765 ring->q_vector = NULL;
4767 /* only VSI w/ an associated netdev is set up w/ NAPI */
4769 netif_napi_del(&q_vector->napi);
4771 vsi->q_vectors[v_idx] = NULL;
4773 kfree_rcu(q_vector, rcu);
4777 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4778 * @vsi: the VSI being un-configured
4780 * This frees the memory allocated to the q_vectors and
4781 * deletes references to the NAPI struct.
4783 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4787 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4788 i40e_free_q_vector(vsi, v_idx);
4792 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4793 * @pf: board private structure
4795 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4797 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4798 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4799 pci_disable_msix(pf->pdev);
4800 kfree(pf->msix_entries);
4801 pf->msix_entries = NULL;
4802 kfree(pf->irq_pile);
4803 pf->irq_pile = NULL;
4804 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4805 pci_disable_msi(pf->pdev);
4807 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4811 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4812 * @pf: board private structure
4814 * We go through and clear interrupt specific resources and reset the structure
4815 * to pre-load conditions
4817 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4821 i40e_free_misc_vector(pf);
4823 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4824 I40E_IWARP_IRQ_PILE_ID);
4826 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4827 for (i = 0; i < pf->num_alloc_vsi; i++)
4829 i40e_vsi_free_q_vectors(pf->vsi[i]);
4830 i40e_reset_interrupt_capability(pf);
4834 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4835 * @vsi: the VSI being configured
4837 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4844 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4845 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4847 if (q_vector->rx.ring || q_vector->tx.ring)
4848 napi_enable(&q_vector->napi);
4853 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4854 * @vsi: the VSI being configured
4856 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4863 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4864 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4866 if (q_vector->rx.ring || q_vector->tx.ring)
4867 napi_disable(&q_vector->napi);
4872 * i40e_vsi_close - Shut down a VSI
4873 * @vsi: the vsi to be quelled
4875 static void i40e_vsi_close(struct i40e_vsi *vsi)
4877 struct i40e_pf *pf = vsi->back;
4878 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4880 i40e_vsi_free_irq(vsi);
4881 i40e_vsi_free_tx_resources(vsi);
4882 i40e_vsi_free_rx_resources(vsi);
4883 vsi->current_netdev_flags = 0;
4884 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
4885 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4886 set_bit(__I40E_CLIENT_RESET, pf->state);
4890 * i40e_quiesce_vsi - Pause a given VSI
4891 * @vsi: the VSI being paused
4893 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4895 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4898 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4899 if (vsi->netdev && netif_running(vsi->netdev))
4900 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4902 i40e_vsi_close(vsi);
4906 * i40e_unquiesce_vsi - Resume a given VSI
4907 * @vsi: the VSI being resumed
4909 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4911 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4914 if (vsi->netdev && netif_running(vsi->netdev))
4915 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4917 i40e_vsi_open(vsi); /* this clears the DOWN bit */
4921 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4924 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4928 for (v = 0; v < pf->num_alloc_vsi; v++) {
4930 i40e_quiesce_vsi(pf->vsi[v]);
4935 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4938 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4942 for (v = 0; v < pf->num_alloc_vsi; v++) {
4944 i40e_unquiesce_vsi(pf->vsi[v]);
4949 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4950 * @vsi: the VSI being configured
4952 * Wait until all queues on a given VSI have been disabled.
4954 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4956 struct i40e_pf *pf = vsi->back;
4959 pf_q = vsi->base_queue;
4960 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4961 /* Check and wait for the Tx queue */
4962 ret = i40e_pf_txq_wait(pf, pf_q, false);
4964 dev_info(&pf->pdev->dev,
4965 "VSI seid %d Tx ring %d disable timeout\n",
4970 if (!i40e_enabled_xdp_vsi(vsi))
4973 /* Check and wait for the XDP Tx queue */
4974 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4977 dev_info(&pf->pdev->dev,
4978 "VSI seid %d XDP Tx ring %d disable timeout\n",
4983 /* Check and wait for the Rx queue */
4984 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4986 dev_info(&pf->pdev->dev,
4987 "VSI seid %d Rx ring %d disable timeout\n",
4996 #ifdef CONFIG_I40E_DCB
4998 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5001 * This function waits for the queues to be in disabled state for all the
5002 * VSIs that are managed by this PF.
5004 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
5008 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5010 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
5022 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
5023 * @pf: pointer to PF
5025 * Get TC map for ISCSI PF type that will include iSCSI TC
5028 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5030 struct i40e_dcb_app_priority_table app;
5031 struct i40e_hw *hw = &pf->hw;
5032 u8 enabled_tc = 1; /* TC0 is always enabled */
5034 /* Get the iSCSI APP TLV */
5035 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5037 for (i = 0; i < dcbcfg->numapps; i++) {
5038 app = dcbcfg->app[i];
5039 if (app.selector == I40E_APP_SEL_TCPIP &&
5040 app.protocolid == I40E_APP_PROTOID_ISCSI) {
5041 tc = dcbcfg->etscfg.prioritytable[app.priority];
5042 enabled_tc |= BIT(tc);
5051 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
5052 * @dcbcfg: the corresponding DCBx configuration structure
5054 * Return the number of TCs from given DCBx configuration
5056 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5058 int i, tc_unused = 0;
5062 /* Scan the ETS Config Priority Table to find
5063 * traffic class enabled for a given priority
5064 * and create a bitmask of enabled TCs
5066 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5067 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5069 /* Now scan the bitmask to check for
5070 * contiguous TCs starting with TC0
5072 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5073 if (num_tc & BIT(i)) {
5077 pr_err("Non-contiguous TC - Disabling DCB\n");
5085 /* There is always at least TC0 */
5093 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5094 * @dcbcfg: the corresponding DCBx configuration structure
5096 * Query the current DCB configuration and return the number of
5097 * traffic classes enabled from the given DCBX config
5099 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5101 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5105 for (i = 0; i < num_tc; i++)
5106 enabled_tc |= BIT(i);
5112 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5113 * @pf: PF being queried
5115 * Query the current MQPRIO configuration and return the number of
5116 * traffic classes enabled.
5118 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5120 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5121 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5122 u8 enabled_tc = 1, i;
5124 for (i = 1; i < num_tc; i++)
5125 enabled_tc |= BIT(i);
5130 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5131 * @pf: PF being queried
5133 * Return number of traffic classes enabled for the given PF
5135 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5137 struct i40e_hw *hw = &pf->hw;
5138 u8 i, enabled_tc = 1;
5140 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5142 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5143 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5145 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5146 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5149 /* SFP mode will be enabled for all TCs on port */
5150 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5151 return i40e_dcb_get_num_tc(dcbcfg);
5153 /* MFP mode return count of enabled TCs for this PF */
5154 if (pf->hw.func_caps.iscsi)
5155 enabled_tc = i40e_get_iscsi_tc_map(pf);
5157 return 1; /* Only TC0 */
5159 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5160 if (enabled_tc & BIT(i))
5167 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
5168 * @pf: PF being queried
5170 * Return a bitmap for enabled traffic classes for this PF.
5172 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5174 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5175 return i40e_mqprio_get_enabled_tc(pf);
5177 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5180 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5181 return I40E_DEFAULT_TRAFFIC_CLASS;
5183 /* SFP mode we want PF to be enabled for all TCs */
5184 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5185 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5187 /* MFP enabled and iSCSI PF type */
5188 if (pf->hw.func_caps.iscsi)
5189 return i40e_get_iscsi_tc_map(pf);
5191 return I40E_DEFAULT_TRAFFIC_CLASS;
5195 * i40e_vsi_get_bw_info - Query VSI BW Information
5196 * @vsi: the VSI being queried
5198 * Returns 0 on success, negative value on failure
5200 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5202 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5203 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5204 struct i40e_pf *pf = vsi->back;
5205 struct i40e_hw *hw = &pf->hw;
5210 /* Get the VSI level BW configuration */
5211 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5213 dev_info(&pf->pdev->dev,
5214 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5215 i40e_stat_str(&pf->hw, ret),
5216 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5220 /* Get the VSI level BW configuration per TC */
5221 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5224 dev_info(&pf->pdev->dev,
5225 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5226 i40e_stat_str(&pf->hw, ret),
5227 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5231 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5232 dev_info(&pf->pdev->dev,
5233 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5234 bw_config.tc_valid_bits,
5235 bw_ets_config.tc_valid_bits);
5236 /* Still continuing */
5239 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5240 vsi->bw_max_quanta = bw_config.max_bw;
5241 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5242 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5243 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5244 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5245 vsi->bw_ets_limit_credits[i] =
5246 le16_to_cpu(bw_ets_config.credits[i]);
5247 /* 3 bits out of 4 for each TC */
5248 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5255 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5256 * @vsi: the VSI being configured
5257 * @enabled_tc: TC bitmap
5258 * @bw_share: BW shared credits per TC
5260 * Returns 0 on success, negative value on failure
5262 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5265 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5266 struct i40e_pf *pf = vsi->back;
5270 /* There is no need to reset BW when mqprio mode is on. */
5271 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5273 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5274 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5276 dev_info(&pf->pdev->dev,
5277 "Failed to reset tx rate for vsi->seid %u\n",
5281 bw_data.tc_valid_bits = enabled_tc;
5282 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5283 bw_data.tc_bw_credits[i] = bw_share[i];
5285 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5287 dev_info(&pf->pdev->dev,
5288 "AQ command Config VSI BW allocation per TC failed = %d\n",
5289 pf->hw.aq.asq_last_status);
5293 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5294 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5300 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5301 * @vsi: the VSI being configured
5302 * @enabled_tc: TC map to be enabled
5305 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5307 struct net_device *netdev = vsi->netdev;
5308 struct i40e_pf *pf = vsi->back;
5309 struct i40e_hw *hw = &pf->hw;
5312 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5318 netdev_reset_tc(netdev);
5322 /* Set up actual enabled TCs on the VSI */
5323 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5326 /* set per TC queues for the VSI */
5327 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5328 /* Only set TC queues for enabled tcs
5330 * e.g. For a VSI that has TC0 and TC3 enabled the
5331 * enabled_tc bitmap would be 0x00001001; the driver
5332 * will set the numtc for netdev as 2 that will be
5333 * referenced by the netdev layer as TC 0 and 1.
5335 if (vsi->tc_config.enabled_tc & BIT(i))
5336 netdev_set_tc_queue(netdev,
5337 vsi->tc_config.tc_info[i].netdev_tc,
5338 vsi->tc_config.tc_info[i].qcount,
5339 vsi->tc_config.tc_info[i].qoffset);
5342 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5345 /* Assign UP2TC map for the VSI */
5346 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5347 /* Get the actual TC# for the UP */
5348 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5349 /* Get the mapped netdev TC# for the UP */
5350 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5351 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5356 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5357 * @vsi: the VSI being configured
5358 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5360 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5361 struct i40e_vsi_context *ctxt)
5363 /* copy just the sections touched not the entire info
5364 * since not all sections are valid as returned by
5367 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5368 memcpy(&vsi->info.queue_mapping,
5369 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5370 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5371 sizeof(vsi->info.tc_mapping));
5375 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5376 * @vsi: VSI to be configured
5377 * @enabled_tc: TC bitmap
5379 * This configures a particular VSI for TCs that are mapped to the
5380 * given TC bitmap. It uses default bandwidth share for TCs across
5381 * VSIs to configure TC for a particular VSI.
5384 * It is expected that the VSI queues have been quisced before calling
5387 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5389 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5390 struct i40e_pf *pf = vsi->back;
5391 struct i40e_hw *hw = &pf->hw;
5392 struct i40e_vsi_context ctxt;
5396 /* Check if enabled_tc is same as existing or new TCs */
5397 if (vsi->tc_config.enabled_tc == enabled_tc &&
5398 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5401 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5402 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5403 if (enabled_tc & BIT(i))
5407 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5409 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5411 dev_info(&pf->pdev->dev,
5412 "Failed configuring TC map %d for VSI %d\n",
5413 enabled_tc, vsi->seid);
5414 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5417 dev_info(&pf->pdev->dev,
5418 "Failed querying vsi bw info, err %s aq_err %s\n",
5419 i40e_stat_str(hw, ret),
5420 i40e_aq_str(hw, hw->aq.asq_last_status));
5423 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5424 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5427 valid_tc = bw_config.tc_valid_bits;
5428 /* Always enable TC0, no matter what */
5430 dev_info(&pf->pdev->dev,
5431 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5432 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5433 enabled_tc = valid_tc;
5436 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5438 dev_err(&pf->pdev->dev,
5439 "Unable to configure TC map %d for VSI %d\n",
5440 enabled_tc, vsi->seid);
5445 /* Update Queue Pairs Mapping for currently enabled UPs */
5446 ctxt.seid = vsi->seid;
5447 ctxt.pf_num = vsi->back->hw.pf_id;
5449 ctxt.uplink_seid = vsi->uplink_seid;
5450 ctxt.info = vsi->info;
5451 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5452 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5456 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5459 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5462 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5463 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5464 vsi->num_queue_pairs);
5465 ret = i40e_vsi_config_rss(vsi);
5467 dev_info(&vsi->back->pdev->dev,
5468 "Failed to reconfig rss for num_queues\n");
5471 vsi->reconfig_rss = false;
5473 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5474 ctxt.info.valid_sections |=
5475 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5476 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5479 /* Update the VSI after updating the VSI queue-mapping
5482 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5484 dev_info(&pf->pdev->dev,
5485 "Update vsi tc config failed, err %s aq_err %s\n",
5486 i40e_stat_str(hw, ret),
5487 i40e_aq_str(hw, hw->aq.asq_last_status));
5490 /* update the local VSI info with updated queue map */
5491 i40e_vsi_update_queue_map(vsi, &ctxt);
5492 vsi->info.valid_sections = 0;
5494 /* Update current VSI BW information */
5495 ret = i40e_vsi_get_bw_info(vsi);
5497 dev_info(&pf->pdev->dev,
5498 "Failed updating vsi bw info, err %s aq_err %s\n",
5499 i40e_stat_str(hw, ret),
5500 i40e_aq_str(hw, hw->aq.asq_last_status));
5504 /* Update the netdev TC setup */
5505 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5511 * i40e_get_link_speed - Returns link speed for the interface
5512 * @vsi: VSI to be configured
5515 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5517 struct i40e_pf *pf = vsi->back;
5519 switch (pf->hw.phy.link_info.link_speed) {
5520 case I40E_LINK_SPEED_40GB:
5522 case I40E_LINK_SPEED_25GB:
5524 case I40E_LINK_SPEED_20GB:
5526 case I40E_LINK_SPEED_10GB:
5528 case I40E_LINK_SPEED_1GB:
5536 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5537 * @vsi: VSI to be configured
5538 * @seid: seid of the channel/VSI
5539 * @max_tx_rate: max TX rate to be configured as BW limit
5541 * Helper function to set BW limit for a given VSI
5543 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5545 struct i40e_pf *pf = vsi->back;
5550 speed = i40e_get_link_speed(vsi);
5551 if (max_tx_rate > speed) {
5552 dev_err(&pf->pdev->dev,
5553 "Invalid max tx rate %llu specified for VSI seid %d.",
5557 if (max_tx_rate && max_tx_rate < 50) {
5558 dev_warn(&pf->pdev->dev,
5559 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5563 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5564 credits = max_tx_rate;
5565 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5566 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5567 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5569 dev_err(&pf->pdev->dev,
5570 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5571 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5572 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5577 * i40e_remove_queue_channels - Remove queue channels for the TCs
5578 * @vsi: VSI to be configured
5580 * Remove queue channels for the TCs
5582 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5584 enum i40e_admin_queue_err last_aq_status;
5585 struct i40e_cloud_filter *cfilter;
5586 struct i40e_channel *ch, *ch_tmp;
5587 struct i40e_pf *pf = vsi->back;
5588 struct hlist_node *node;
5591 /* Reset rss size that was stored when reconfiguring rss for
5592 * channel VSIs with non-power-of-2 queue count.
5594 vsi->current_rss_size = 0;
5596 /* perform cleanup for channels if they exist */
5597 if (list_empty(&vsi->ch_list))
5600 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5601 struct i40e_vsi *p_vsi;
5603 list_del(&ch->list);
5604 p_vsi = ch->parent_vsi;
5605 if (!p_vsi || !ch->initialized) {
5609 /* Reset queue contexts */
5610 for (i = 0; i < ch->num_queue_pairs; i++) {
5611 struct i40e_ring *tx_ring, *rx_ring;
5614 pf_q = ch->base_queue + i;
5615 tx_ring = vsi->tx_rings[pf_q];
5618 rx_ring = vsi->rx_rings[pf_q];
5622 /* Reset BW configured for this VSI via mqprio */
5623 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5625 dev_info(&vsi->back->pdev->dev,
5626 "Failed to reset tx rate for ch->seid %u\n",
5629 /* delete cloud filters associated with this channel */
5630 hlist_for_each_entry_safe(cfilter, node,
5631 &pf->cloud_filter_list, cloud_node) {
5632 if (cfilter->seid != ch->seid)
5635 hash_del(&cfilter->cloud_node);
5636 if (cfilter->dst_port)
5637 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5641 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5643 last_aq_status = pf->hw.aq.asq_last_status;
5645 dev_info(&pf->pdev->dev,
5646 "Failed to delete cloud filter, err %s aq_err %s\n",
5647 i40e_stat_str(&pf->hw, ret),
5648 i40e_aq_str(&pf->hw, last_aq_status));
5652 /* delete VSI from FW */
5653 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5656 dev_err(&vsi->back->pdev->dev,
5657 "unable to remove channel (%d) for parent VSI(%d)\n",
5658 ch->seid, p_vsi->seid);
5661 INIT_LIST_HEAD(&vsi->ch_list);
5665 * i40e_is_any_channel - channel exist or not
5666 * @vsi: ptr to VSI to which channels are associated with
5668 * Returns true or false if channel(s) exist for associated VSI or not
5670 static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5672 struct i40e_channel *ch, *ch_tmp;
5674 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5675 if (ch->initialized)
5683 * i40e_get_max_queues_for_channel
5684 * @vsi: ptr to VSI to which channels are associated with
5686 * Helper function which returns max value among the queue counts set on the
5687 * channels/TCs created.
5689 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5691 struct i40e_channel *ch, *ch_tmp;
5694 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5695 if (!ch->initialized)
5697 if (ch->num_queue_pairs > max)
5698 max = ch->num_queue_pairs;
5705 * i40e_validate_num_queues - validate num_queues w.r.t channel
5706 * @pf: ptr to PF device
5707 * @num_queues: number of queues
5708 * @vsi: the parent VSI
5709 * @reconfig_rss: indicates should the RSS be reconfigured or not
5711 * This function validates number of queues in the context of new channel
5712 * which is being established and determines if RSS should be reconfigured
5713 * or not for parent VSI.
5715 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5716 struct i40e_vsi *vsi, bool *reconfig_rss)
5723 *reconfig_rss = false;
5724 if (vsi->current_rss_size) {
5725 if (num_queues > vsi->current_rss_size) {
5726 dev_dbg(&pf->pdev->dev,
5727 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5728 num_queues, vsi->current_rss_size);
5730 } else if ((num_queues < vsi->current_rss_size) &&
5731 (!is_power_of_2(num_queues))) {
5732 dev_dbg(&pf->pdev->dev,
5733 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5734 num_queues, vsi->current_rss_size);
5739 if (!is_power_of_2(num_queues)) {
5740 /* Find the max num_queues configured for channel if channel
5742 * if channel exist, then enforce 'num_queues' to be more than
5743 * max ever queues configured for channel.
5745 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5746 if (num_queues < max_ch_queues) {
5747 dev_dbg(&pf->pdev->dev,
5748 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5749 num_queues, max_ch_queues);
5752 *reconfig_rss = true;
5759 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5760 * @vsi: the VSI being setup
5761 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5763 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5765 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5767 struct i40e_pf *pf = vsi->back;
5768 u8 seed[I40E_HKEY_ARRAY_SIZE];
5769 struct i40e_hw *hw = &pf->hw;
5777 if (rss_size > vsi->rss_size)
5780 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5781 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5785 /* Ignoring user configured lut if there is one */
5786 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5788 /* Use user configured hash key if there is one, otherwise
5791 if (vsi->rss_hkey_user)
5792 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5794 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5796 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5798 dev_info(&pf->pdev->dev,
5799 "Cannot set RSS lut, err %s aq_err %s\n",
5800 i40e_stat_str(hw, ret),
5801 i40e_aq_str(hw, hw->aq.asq_last_status));
5807 /* Do the update w.r.t. storing rss_size */
5808 if (!vsi->orig_rss_size)
5809 vsi->orig_rss_size = vsi->rss_size;
5810 vsi->current_rss_size = local_rss_size;
5816 * i40e_channel_setup_queue_map - Setup a channel queue map
5817 * @pf: ptr to PF device
5818 * @vsi: the VSI being setup
5819 * @ctxt: VSI context structure
5820 * @ch: ptr to channel structure
5822 * Setup queue map for a specific channel
5824 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5825 struct i40e_vsi_context *ctxt,
5826 struct i40e_channel *ch)
5828 u16 qcount, qmap, sections = 0;
5832 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5833 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5835 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5836 ch->num_queue_pairs = qcount;
5838 /* find the next higher power-of-2 of num queue pairs */
5839 pow = ilog2(qcount);
5840 if (!is_power_of_2(qcount))
5843 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5844 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5846 /* Setup queue TC[0].qmap for given VSI context */
5847 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5849 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
5850 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5851 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
5852 ctxt->info.valid_sections |= cpu_to_le16(sections);
5856 * i40e_add_channel - add a channel by adding VSI
5857 * @pf: ptr to PF device
5858 * @uplink_seid: underlying HW switching element (VEB) ID
5859 * @ch: ptr to channel structure
5861 * Add a channel (VSI) using add_vsi and queue_map
5863 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
5864 struct i40e_channel *ch)
5866 struct i40e_hw *hw = &pf->hw;
5867 struct i40e_vsi_context ctxt;
5868 u8 enabled_tc = 0x1; /* TC0 enabled */
5871 if (ch->type != I40E_VSI_VMDQ2) {
5872 dev_info(&pf->pdev->dev,
5873 "add new vsi failed, ch->type %d\n", ch->type);
5877 memset(&ctxt, 0, sizeof(ctxt));
5878 ctxt.pf_num = hw->pf_id;
5880 ctxt.uplink_seid = uplink_seid;
5881 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5882 if (ch->type == I40E_VSI_VMDQ2)
5883 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5885 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
5886 ctxt.info.valid_sections |=
5887 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5888 ctxt.info.switch_id =
5889 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5892 /* Set queue map for a given VSI context */
5893 i40e_channel_setup_queue_map(pf, &ctxt, ch);
5895 /* Now time to create VSI */
5896 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5898 dev_info(&pf->pdev->dev,
5899 "add new vsi failed, err %s aq_err %s\n",
5900 i40e_stat_str(&pf->hw, ret),
5901 i40e_aq_str(&pf->hw,
5902 pf->hw.aq.asq_last_status));
5906 /* Success, update channel, set enabled_tc only if the channel
5909 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
5910 ch->seid = ctxt.seid;
5911 ch->vsi_number = ctxt.vsi_number;
5912 ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
5914 /* copy just the sections touched not the entire info
5915 * since not all sections are valid as returned by
5918 ch->info.mapping_flags = ctxt.info.mapping_flags;
5919 memcpy(&ch->info.queue_mapping,
5920 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
5921 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
5922 sizeof(ctxt.info.tc_mapping));
5927 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
5930 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5934 bw_data.tc_valid_bits = ch->enabled_tc;
5935 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5936 bw_data.tc_bw_credits[i] = bw_share[i];
5938 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
5941 dev_info(&vsi->back->pdev->dev,
5942 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5943 vsi->back->hw.aq.asq_last_status, ch->seid);
5947 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5948 ch->info.qs_handle[i] = bw_data.qs_handles[i];
5954 * i40e_channel_config_tx_ring - config TX ring associated with new channel
5955 * @pf: ptr to PF device
5956 * @vsi: the VSI being setup
5957 * @ch: ptr to channel structure
5959 * Configure TX rings associated with channel (VSI) since queues are being
5962 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
5963 struct i40e_vsi *vsi,
5964 struct i40e_channel *ch)
5968 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5970 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5971 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5972 if (ch->enabled_tc & BIT(i))
5976 /* configure BW for new VSI */
5977 ret = i40e_channel_config_bw(vsi, ch, bw_share);
5979 dev_info(&vsi->back->pdev->dev,
5980 "Failed configuring TC map %d for channel (seid %u)\n",
5981 ch->enabled_tc, ch->seid);
5985 for (i = 0; i < ch->num_queue_pairs; i++) {
5986 struct i40e_ring *tx_ring, *rx_ring;
5989 pf_q = ch->base_queue + i;
5991 /* Get to TX ring ptr of main VSI, for re-setup TX queue
5994 tx_ring = vsi->tx_rings[pf_q];
5997 /* Get the RX ring ptr */
5998 rx_ring = vsi->rx_rings[pf_q];
6006 * i40e_setup_hw_channel - setup new channel
6007 * @pf: ptr to PF device
6008 * @vsi: the VSI being setup
6009 * @ch: ptr to channel structure
6010 * @uplink_seid: underlying HW switching element (VEB) ID
6011 * @type: type of channel to be created (VMDq2/VF)
6013 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6014 * and configures TX rings accordingly
6016 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
6017 struct i40e_vsi *vsi,
6018 struct i40e_channel *ch,
6019 u16 uplink_seid, u8 type)
6023 ch->initialized = false;
6024 ch->base_queue = vsi->next_base_queue;
6027 /* Proceed with creation of channel (VMDq2) VSI */
6028 ret = i40e_add_channel(pf, uplink_seid, ch);
6030 dev_info(&pf->pdev->dev,
6031 "failed to add_channel using uplink_seid %u\n",
6036 /* Mark the successful creation of channel */
6037 ch->initialized = true;
6039 /* Reconfigure TX queues using QTX_CTL register */
6040 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6042 dev_info(&pf->pdev->dev,
6043 "failed to configure TX rings for channel %u\n",
6048 /* update 'next_base_queue' */
6049 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6050 dev_dbg(&pf->pdev->dev,
6051 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6052 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6053 ch->num_queue_pairs,
6054 vsi->next_base_queue);
6059 * i40e_setup_channel - setup new channel using uplink element
6060 * @pf: ptr to PF device
6061 * @type: type of channel to be created (VMDq2/VF)
6062 * @uplink_seid: underlying HW switching element (VEB) ID
6063 * @ch: ptr to channel structure
6065 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6066 * and uplink switching element (uplink_seid)
6068 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6069 struct i40e_channel *ch)
6075 if (vsi->type == I40E_VSI_MAIN) {
6076 vsi_type = I40E_VSI_VMDQ2;
6078 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6083 /* underlying switching element */
6084 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6086 /* create channel (VSI), configure TX rings */
6087 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6089 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6093 return ch->initialized ? true : false;
6097 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6098 * @vsi: ptr to VSI which has PF backing
6100 * Sets up switch mode correctly if it needs to be changed and perform
6101 * what are allowed modes.
6103 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6106 struct i40e_pf *pf = vsi->back;
6107 struct i40e_hw *hw = &pf->hw;
6110 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6114 if (hw->dev_caps.switch_mode) {
6115 /* if switch mode is set, support mode2 (non-tunneled for
6116 * cloud filter) for now
6118 u32 switch_mode = hw->dev_caps.switch_mode &
6119 I40E_SWITCH_MODE_MASK;
6120 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6121 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6123 dev_err(&pf->pdev->dev,
6124 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6125 hw->dev_caps.switch_mode);
6130 /* Set Bit 7 to be valid */
6131 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6133 /* Set L4type for TCP support */
6134 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6136 /* Set cloud filter mode */
6137 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6139 /* Prep mode field for set_switch_config */
6140 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6141 pf->last_sw_conf_valid_flags,
6143 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6144 dev_err(&pf->pdev->dev,
6145 "couldn't set switch config bits, err %s aq_err %s\n",
6146 i40e_stat_str(hw, ret),
6148 hw->aq.asq_last_status));
6154 * i40e_create_queue_channel - function to create channel
6155 * @vsi: VSI to be configured
6156 * @ch: ptr to channel (it contains channel specific params)
6158 * This function creates channel (VSI) using num_queues specified by user,
6159 * reconfigs RSS if needed.
6161 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6162 struct i40e_channel *ch)
6164 struct i40e_pf *pf = vsi->back;
6171 if (!ch->num_queue_pairs) {
6172 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6173 ch->num_queue_pairs);
6177 /* validate user requested num_queues for channel */
6178 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6181 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6182 ch->num_queue_pairs);
6186 /* By default we are in VEPA mode, if this is the first VF/VMDq
6187 * VSI to be added switch to VEB mode.
6189 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6190 (!i40e_is_any_channel(vsi))) {
6191 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6192 dev_dbg(&pf->pdev->dev,
6193 "Failed to create channel. Override queues (%u) not power of 2\n",
6194 vsi->tc_config.tc_info[0].qcount);
6198 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6199 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6201 if (vsi->type == I40E_VSI_MAIN) {
6202 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6203 i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6206 i40e_do_reset_safe(pf,
6207 I40E_PF_RESET_FLAG);
6210 /* now onwards for main VSI, number of queues will be value
6211 * of TC0's queue count
6215 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6216 * it should be more than num_queues
6218 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6219 dev_dbg(&pf->pdev->dev,
6220 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6221 vsi->cnt_q_avail, ch->num_queue_pairs);
6225 /* reconfig_rss only if vsi type is MAIN_VSI */
6226 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6227 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6229 dev_info(&pf->pdev->dev,
6230 "Error: unable to reconfig rss for num_queues (%u)\n",
6231 ch->num_queue_pairs);
6236 if (!i40e_setup_channel(pf, vsi, ch)) {
6237 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6241 dev_info(&pf->pdev->dev,
6242 "Setup channel (id:%u) utilizing num_queues %d\n",
6243 ch->seid, ch->num_queue_pairs);
6245 /* configure VSI for BW limit */
6246 if (ch->max_tx_rate) {
6247 u64 credits = ch->max_tx_rate;
6249 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6252 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6253 dev_dbg(&pf->pdev->dev,
6254 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6260 /* in case of VF, this will be main SRIOV VSI */
6261 ch->parent_vsi = vsi;
6263 /* and update main_vsi's count for queue_available to use */
6264 vsi->cnt_q_avail -= ch->num_queue_pairs;
6270 * i40e_configure_queue_channels - Add queue channel for the given TCs
6271 * @vsi: VSI to be configured
6273 * Configures queue channel mapping to the given TCs
6275 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6277 struct i40e_channel *ch;
6281 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6282 vsi->tc_seid_map[0] = vsi->seid;
6283 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6284 if (vsi->tc_config.enabled_tc & BIT(i)) {
6285 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6291 INIT_LIST_HEAD(&ch->list);
6292 ch->num_queue_pairs =
6293 vsi->tc_config.tc_info[i].qcount;
6295 vsi->tc_config.tc_info[i].qoffset;
6297 /* Bandwidth limit through tc interface is in bytes/s,
6300 max_rate = vsi->mqprio_qopt.max_rate[i];
6301 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6302 ch->max_tx_rate = max_rate;
6304 list_add_tail(&ch->list, &vsi->ch_list);
6306 ret = i40e_create_queue_channel(vsi, ch);
6308 dev_err(&vsi->back->pdev->dev,
6309 "Failed creating queue channel with TC%d: queues %d\n",
6310 i, ch->num_queue_pairs);
6313 vsi->tc_seid_map[i] = ch->seid;
6319 i40e_remove_queue_channels(vsi);
6324 * i40e_veb_config_tc - Configure TCs for given VEB
6326 * @enabled_tc: TC bitmap
6328 * Configures given TC bitmap for VEB (switching) element
6330 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6332 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6333 struct i40e_pf *pf = veb->pf;
6337 /* No TCs or already enabled TCs just return */
6338 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6341 bw_data.tc_valid_bits = enabled_tc;
6342 /* bw_data.absolute_credits is not set (relative) */
6344 /* Enable ETS TCs with equal BW Share for now */
6345 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6346 if (enabled_tc & BIT(i))
6347 bw_data.tc_bw_share_credits[i] = 1;
6350 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6353 dev_info(&pf->pdev->dev,
6354 "VEB bw config failed, err %s aq_err %s\n",
6355 i40e_stat_str(&pf->hw, ret),
6356 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6360 /* Update the BW information */
6361 ret = i40e_veb_get_bw_info(veb);
6363 dev_info(&pf->pdev->dev,
6364 "Failed getting veb bw config, err %s aq_err %s\n",
6365 i40e_stat_str(&pf->hw, ret),
6366 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6373 #ifdef CONFIG_I40E_DCB
6375 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6378 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6379 * the caller would've quiesce all the VSIs before calling
6382 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6388 /* Enable the TCs available on PF to all VEBs */
6389 tc_map = i40e_pf_get_tc_map(pf);
6390 for (v = 0; v < I40E_MAX_VEB; v++) {
6393 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6395 dev_info(&pf->pdev->dev,
6396 "Failed configuring TC for VEB seid=%d\n",
6398 /* Will try to configure as many components */
6402 /* Update each VSI */
6403 for (v = 0; v < pf->num_alloc_vsi; v++) {
6407 /* - Enable all TCs for the LAN VSI
6408 * - For all others keep them at TC0 for now
6410 if (v == pf->lan_vsi)
6411 tc_map = i40e_pf_get_tc_map(pf);
6413 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6415 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6417 dev_info(&pf->pdev->dev,
6418 "Failed configuring TC for VSI seid=%d\n",
6420 /* Will try to configure as many components */
6422 /* Re-configure VSI vectors based on updated TC map */
6423 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6424 if (pf->vsi[v]->netdev)
6425 i40e_dcbnl_set_all(pf->vsi[v]);
6431 * i40e_resume_port_tx - Resume port Tx
6434 * Resume a port's Tx and issue a PF reset in case of failure to
6437 static int i40e_resume_port_tx(struct i40e_pf *pf)
6439 struct i40e_hw *hw = &pf->hw;
6442 ret = i40e_aq_resume_port_tx(hw, NULL);
6444 dev_info(&pf->pdev->dev,
6445 "Resume Port Tx failed, err %s aq_err %s\n",
6446 i40e_stat_str(&pf->hw, ret),
6447 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6448 /* Schedule PF reset to recover */
6449 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6450 i40e_service_event_schedule(pf);
6457 * i40e_init_pf_dcb - Initialize DCB configuration
6458 * @pf: PF being configured
6460 * Query the current DCB configuration and cache it
6461 * in the hardware structure
6463 static int i40e_init_pf_dcb(struct i40e_pf *pf)
6465 struct i40e_hw *hw = &pf->hw;
6468 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
6469 * Also do not enable DCBx if FW LLDP agent is disabled
6471 if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
6472 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) {
6473 dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n");
6474 err = I40E_NOT_SUPPORTED;
6478 err = i40e_init_dcb(hw, true);
6480 /* Device/Function is not DCBX capable */
6481 if ((!hw->func_caps.dcb) ||
6482 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6483 dev_info(&pf->pdev->dev,
6484 "DCBX offload is not supported or is disabled for this PF.\n");
6486 /* When status is not DISABLED then DCBX in FW */
6487 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6488 DCB_CAP_DCBX_VER_IEEE;
6490 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6491 /* Enable DCB tagging only when more than one TC
6492 * or explicitly disable if only one TC
6494 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6495 pf->flags |= I40E_FLAG_DCB_ENABLED;
6497 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6498 dev_dbg(&pf->pdev->dev,
6499 "DCBX offload is supported for this PF.\n");
6501 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6502 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6503 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6505 dev_info(&pf->pdev->dev,
6506 "Query for DCB configuration failed, err %s aq_err %s\n",
6507 i40e_stat_str(&pf->hw, err),
6508 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6514 #endif /* CONFIG_I40E_DCB */
6517 * i40e_print_link_message - print link up or down
6518 * @vsi: the VSI for which link needs a message
6519 * @isup: true of link is up, false otherwise
6521 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6523 enum i40e_aq_link_speed new_speed;
6524 struct i40e_pf *pf = vsi->back;
6525 char *speed = "Unknown";
6526 char *fc = "Unknown";
6532 new_speed = pf->hw.phy.link_info.link_speed;
6534 new_speed = I40E_LINK_SPEED_UNKNOWN;
6536 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
6538 vsi->current_isup = isup;
6539 vsi->current_speed = new_speed;
6541 netdev_info(vsi->netdev, "NIC Link is Down\n");
6545 /* Warn user if link speed on NPAR enabled partition is not at
6548 if (pf->hw.func_caps.npar_enable &&
6549 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
6550 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
6551 netdev_warn(vsi->netdev,
6552 "The partition detected link speed that is less than 10Gbps\n");
6554 switch (pf->hw.phy.link_info.link_speed) {
6555 case I40E_LINK_SPEED_40GB:
6558 case I40E_LINK_SPEED_20GB:
6561 case I40E_LINK_SPEED_25GB:
6564 case I40E_LINK_SPEED_10GB:
6567 case I40E_LINK_SPEED_5GB:
6570 case I40E_LINK_SPEED_2_5GB:
6573 case I40E_LINK_SPEED_1GB:
6576 case I40E_LINK_SPEED_100MB:
6583 switch (pf->hw.fc.current_mode) {
6587 case I40E_FC_TX_PAUSE:
6590 case I40E_FC_RX_PAUSE:
6598 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
6603 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6606 if (pf->hw.phy.link_info.fec_info &
6607 I40E_AQ_CONFIG_FEC_KR_ENA)
6608 fec = "CL74 FC-FEC/BASE-R";
6609 else if (pf->hw.phy.link_info.fec_info &
6610 I40E_AQ_CONFIG_FEC_RS_ENA)
6611 fec = "CL108 RS-FEC";
6613 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
6614 * both RS and FC are requested
6616 if (vsi->back->hw.phy.link_info.req_fec_info &
6617 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
6618 if (vsi->back->hw.phy.link_info.req_fec_info &
6619 I40E_AQ_REQUEST_FEC_RS)
6620 req_fec = "CL108 RS-FEC";
6622 req_fec = "CL74 FC-FEC/BASE-R";
6624 netdev_info(vsi->netdev,
6625 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
6626 speed, req_fec, fec, an, fc);
6628 netdev_info(vsi->netdev,
6629 "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
6636 * i40e_up_complete - Finish the last steps of bringing up a connection
6637 * @vsi: the VSI being configured
6639 static int i40e_up_complete(struct i40e_vsi *vsi)
6641 struct i40e_pf *pf = vsi->back;
6644 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6645 i40e_vsi_configure_msix(vsi);
6647 i40e_configure_msi_and_legacy(vsi);
6650 err = i40e_vsi_start_rings(vsi);
6654 clear_bit(__I40E_VSI_DOWN, vsi->state);
6655 i40e_napi_enable_all(vsi);
6656 i40e_vsi_enable_irq(vsi);
6658 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
6660 i40e_print_link_message(vsi, true);
6661 netif_tx_start_all_queues(vsi->netdev);
6662 netif_carrier_on(vsi->netdev);
6665 /* replay FDIR SB filters */
6666 if (vsi->type == I40E_VSI_FDIR) {
6667 /* reset fd counters */
6670 i40e_fdir_filter_restore(vsi);
6673 /* On the next run of the service_task, notify any clients of the new
6676 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6677 i40e_service_event_schedule(pf);
6683 * i40e_vsi_reinit_locked - Reset the VSI
6684 * @vsi: the VSI being configured
6686 * Rebuild the ring structs after some configuration
6687 * has changed, e.g. MTU size.
6689 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
6691 struct i40e_pf *pf = vsi->back;
6693 WARN_ON(in_interrupt());
6694 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
6695 usleep_range(1000, 2000);
6699 clear_bit(__I40E_CONFIG_BUSY, pf->state);
6703 * i40e_force_link_state - Force the link status
6704 * @pf: board private structure
6705 * @is_up: whether the link state should be forced up or down
6707 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
6709 struct i40e_aq_get_phy_abilities_resp abilities;
6710 struct i40e_aq_set_phy_config config = {0};
6711 bool non_zero_phy_type = is_up;
6712 struct i40e_hw *hw = &pf->hw;
6717 /* Card might've been put in an unstable state by other drivers
6718 * and applications, which causes incorrect speed values being
6719 * set on startup. In order to clear speed registers, we call
6720 * get_phy_capabilities twice, once to get initial state of
6721 * available speeds, and once to get current PHY config.
6723 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
6726 dev_err(&pf->pdev->dev,
6727 "failed to get phy cap., ret = %s last_status = %s\n",
6728 i40e_stat_str(hw, err),
6729 i40e_aq_str(hw, hw->aq.asq_last_status));
6732 speed = abilities.link_speed;
6734 /* Get the current phy config */
6735 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
6738 dev_err(&pf->pdev->dev,
6739 "failed to get phy cap., ret = %s last_status = %s\n",
6740 i40e_stat_str(hw, err),
6741 i40e_aq_str(hw, hw->aq.asq_last_status));
6745 /* If link needs to go up, but was not forced to go down,
6746 * and its speed values are OK, no need for a flap
6747 * if non_zero_phy_type was set, still need to force up
6749 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
6750 non_zero_phy_type = true;
6751 else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
6752 return I40E_SUCCESS;
6754 /* To force link we need to set bits for all supported PHY types,
6755 * but there are now more than 32, so we need to split the bitmap
6756 * across two fields.
6758 mask = I40E_PHY_TYPES_BITMASK;
6760 non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
6761 config.phy_type_ext =
6762 non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
6763 /* Copy the old settings, except of phy_type */
6764 config.abilities = abilities.abilities;
6765 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
6767 config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
6769 config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
6771 if (abilities.link_speed != 0)
6772 config.link_speed = abilities.link_speed;
6774 config.link_speed = speed;
6775 config.eee_capability = abilities.eee_capability;
6776 config.eeer = abilities.eeer_val;
6777 config.low_power_ctrl = abilities.d3_lpan;
6778 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
6779 I40E_AQ_PHY_FEC_CONFIG_MASK;
6780 err = i40e_aq_set_phy_config(hw, &config, NULL);
6783 dev_err(&pf->pdev->dev,
6784 "set phy config ret = %s last_status = %s\n",
6785 i40e_stat_str(&pf->hw, err),
6786 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6790 /* Update the link info */
6791 err = i40e_update_link_info(hw);
6793 /* Wait a little bit (on 40G cards it sometimes takes a really
6794 * long time for link to come back from the atomic reset)
6798 i40e_update_link_info(hw);
6801 i40e_aq_set_link_restart_an(hw, is_up, NULL);
6803 return I40E_SUCCESS;
6807 * i40e_up - Bring the connection back up after being down
6808 * @vsi: the VSI being configured
6810 int i40e_up(struct i40e_vsi *vsi)
6814 if (vsi->type == I40E_VSI_MAIN &&
6815 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
6816 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
6817 i40e_force_link_state(vsi->back, true);
6819 err = i40e_vsi_configure(vsi);
6821 err = i40e_up_complete(vsi);
6827 * i40e_down - Shutdown the connection processing
6828 * @vsi: the VSI being stopped
6830 void i40e_down(struct i40e_vsi *vsi)
6834 /* It is assumed that the caller of this function
6835 * sets the vsi->state __I40E_VSI_DOWN bit.
6838 netif_carrier_off(vsi->netdev);
6839 netif_tx_disable(vsi->netdev);
6841 i40e_vsi_disable_irq(vsi);
6842 i40e_vsi_stop_rings(vsi);
6843 if (vsi->type == I40E_VSI_MAIN &&
6844 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
6845 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
6846 i40e_force_link_state(vsi->back, false);
6847 i40e_napi_disable_all(vsi);
6849 for (i = 0; i < vsi->num_queue_pairs; i++) {
6850 i40e_clean_tx_ring(vsi->tx_rings[i]);
6851 if (i40e_enabled_xdp_vsi(vsi)) {
6852 /* Make sure that in-progress ndo_xdp_xmit and
6853 * ndo_xsk_wakeup calls are completed.
6856 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6858 i40e_clean_rx_ring(vsi->rx_rings[i]);
6864 * i40e_validate_mqprio_qopt- validate queue mapping info
6865 * @vsi: the VSI being configured
6866 * @mqprio_qopt: queue parametrs
6868 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
6869 struct tc_mqprio_qopt_offload *mqprio_qopt)
6871 u64 sum_max_rate = 0;
6875 if (mqprio_qopt->qopt.offset[0] != 0 ||
6876 mqprio_qopt->qopt.num_tc < 1 ||
6877 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
6879 for (i = 0; ; i++) {
6880 if (!mqprio_qopt->qopt.count[i])
6882 if (mqprio_qopt->min_rate[i]) {
6883 dev_err(&vsi->back->pdev->dev,
6884 "Invalid min tx rate (greater than 0) specified\n");
6887 max_rate = mqprio_qopt->max_rate[i];
6888 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6889 sum_max_rate += max_rate;
6891 if (i >= mqprio_qopt->qopt.num_tc - 1)
6893 if (mqprio_qopt->qopt.offset[i + 1] !=
6894 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
6897 if (vsi->num_queue_pairs <
6898 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
6901 if (sum_max_rate > i40e_get_link_speed(vsi)) {
6902 dev_err(&vsi->back->pdev->dev,
6903 "Invalid max tx rate specified\n");
6910 * i40e_vsi_set_default_tc_config - set default values for tc configuration
6911 * @vsi: the VSI being configured
6913 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
6918 /* Only TC0 is enabled */
6919 vsi->tc_config.numtc = 1;
6920 vsi->tc_config.enabled_tc = 1;
6921 qcount = min_t(int, vsi->alloc_queue_pairs,
6922 i40e_pf_get_max_q_per_tc(vsi->back));
6923 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6924 /* For the TC that is not enabled set the offset to to default
6925 * queue and allocate one queue for the given TC.
6927 vsi->tc_config.tc_info[i].qoffset = 0;
6929 vsi->tc_config.tc_info[i].qcount = qcount;
6931 vsi->tc_config.tc_info[i].qcount = 1;
6932 vsi->tc_config.tc_info[i].netdev_tc = 0;
6937 * i40e_del_macvlan_filter
6938 * @hw: pointer to the HW structure
6939 * @seid: seid of the channel VSI
6940 * @macaddr: the mac address to apply as a filter
6941 * @aq_err: store the admin Q error
6943 * This function deletes a mac filter on the channel VSI which serves as the
6944 * macvlan. Returns 0 on success.
6946 static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
6947 const u8 *macaddr, int *aq_err)
6949 struct i40e_aqc_remove_macvlan_element_data element;
6952 memset(&element, 0, sizeof(element));
6953 ether_addr_copy(element.mac_addr, macaddr);
6954 element.vlan_tag = 0;
6955 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6956 status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
6957 *aq_err = hw->aq.asq_last_status;
6963 * i40e_add_macvlan_filter
6964 * @hw: pointer to the HW structure
6965 * @seid: seid of the channel VSI
6966 * @macaddr: the mac address to apply as a filter
6967 * @aq_err: store the admin Q error
6969 * This function adds a mac filter on the channel VSI which serves as the
6970 * macvlan. Returns 0 on success.
6972 static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
6973 const u8 *macaddr, int *aq_err)
6975 struct i40e_aqc_add_macvlan_element_data element;
6979 ether_addr_copy(element.mac_addr, macaddr);
6980 element.vlan_tag = 0;
6981 element.queue_number = 0;
6982 element.match_method = I40E_AQC_MM_ERR_NO_RES;
6983 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6984 element.flags = cpu_to_le16(cmd_flags);
6985 status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
6986 *aq_err = hw->aq.asq_last_status;
6992 * i40e_reset_ch_rings - Reset the queue contexts in a channel
6993 * @vsi: the VSI we want to access
6994 * @ch: the channel we want to access
6996 static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
6998 struct i40e_ring *tx_ring, *rx_ring;
7002 for (i = 0; i < ch->num_queue_pairs; i++) {
7003 pf_q = ch->base_queue + i;
7004 tx_ring = vsi->tx_rings[pf_q];
7006 rx_ring = vsi->rx_rings[pf_q];
7012 * i40e_free_macvlan_channels
7013 * @vsi: the VSI we want to access
7015 * This function frees the Qs of the channel VSI from
7016 * the stack and also deletes the channel VSIs which
7017 * serve as macvlans.
7019 static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
7021 struct i40e_channel *ch, *ch_tmp;
7024 if (list_empty(&vsi->macvlan_list))
7027 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7028 struct i40e_vsi *parent_vsi;
7030 if (i40e_is_channel_macvlan(ch)) {
7031 i40e_reset_ch_rings(vsi, ch);
7032 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7033 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
7034 netdev_set_sb_channel(ch->fwd->netdev, 0);
7039 list_del(&ch->list);
7040 parent_vsi = ch->parent_vsi;
7041 if (!parent_vsi || !ch->initialized) {
7046 /* remove the VSI */
7047 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7050 dev_err(&vsi->back->pdev->dev,
7051 "unable to remove channel (%d) for parent VSI(%d)\n",
7052 ch->seid, parent_vsi->seid);
7055 vsi->macvlan_cnt = 0;
7059 * i40e_fwd_ring_up - bring the macvlan device up
7060 * @vsi: the VSI we want to access
7061 * @vdev: macvlan netdevice
7062 * @fwd: the private fwd structure
7064 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7065 struct i40e_fwd_adapter *fwd)
7067 int ret = 0, num_tc = 1, i, aq_err;
7068 struct i40e_channel *ch, *ch_tmp;
7069 struct i40e_pf *pf = vsi->back;
7070 struct i40e_hw *hw = &pf->hw;
7072 if (list_empty(&vsi->macvlan_list))
7075 /* Go through the list and find an available channel */
7076 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7077 if (!i40e_is_channel_macvlan(ch)) {
7079 /* record configuration for macvlan interface in vdev */
7080 for (i = 0; i < num_tc; i++)
7081 netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7083 ch->num_queue_pairs,
7085 for (i = 0; i < ch->num_queue_pairs; i++) {
7086 struct i40e_ring *tx_ring, *rx_ring;
7089 pf_q = ch->base_queue + i;
7091 /* Get to TX ring ptr */
7092 tx_ring = vsi->tx_rings[pf_q];
7095 /* Get the RX ring ptr */
7096 rx_ring = vsi->rx_rings[pf_q];
7103 /* Guarantee all rings are updated before we update the
7104 * MAC address filter.
7108 /* Add a mac filter */
7109 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7111 /* if we cannot add the MAC rule then disable the offload */
7112 macvlan_release_l2fw_offload(vdev);
7113 for (i = 0; i < ch->num_queue_pairs; i++) {
7114 struct i40e_ring *rx_ring;
7117 pf_q = ch->base_queue + i;
7118 rx_ring = vsi->rx_rings[pf_q];
7119 rx_ring->netdev = NULL;
7121 dev_info(&pf->pdev->dev,
7122 "Error adding mac filter on macvlan err %s, aq_err %s\n",
7123 i40e_stat_str(hw, ret),
7124 i40e_aq_str(hw, aq_err));
7125 netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7132 * i40e_setup_macvlans - create the channels which will be macvlans
7133 * @vsi: the VSI we want to access
7134 * @macvlan_cnt: no. of macvlans to be setup
7135 * @qcnt: no. of Qs per macvlan
7136 * @vdev: macvlan netdevice
7138 static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7139 struct net_device *vdev)
7141 struct i40e_pf *pf = vsi->back;
7142 struct i40e_hw *hw = &pf->hw;
7143 struct i40e_vsi_context ctxt;
7144 u16 sections, qmap, num_qps;
7145 struct i40e_channel *ch;
7146 int i, pow, ret = 0;
7149 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7152 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7154 /* find the next higher power-of-2 of num queue pairs */
7155 pow = fls(roundup_pow_of_two(num_qps) - 1);
7157 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7158 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7160 /* Setup context bits for the main VSI */
7161 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7162 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7163 memset(&ctxt, 0, sizeof(ctxt));
7164 ctxt.seid = vsi->seid;
7165 ctxt.pf_num = vsi->back->hw.pf_id;
7167 ctxt.uplink_seid = vsi->uplink_seid;
7168 ctxt.info = vsi->info;
7169 ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7170 ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7171 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7172 ctxt.info.valid_sections |= cpu_to_le16(sections);
7174 /* Reconfigure RSS for main VSI with new max queue count */
7175 vsi->rss_size = max_t(u16, num_qps, qcnt);
7176 ret = i40e_vsi_config_rss(vsi);
7178 dev_info(&pf->pdev->dev,
7179 "Failed to reconfig RSS for num_queues (%u)\n",
7183 vsi->reconfig_rss = true;
7184 dev_dbg(&vsi->back->pdev->dev,
7185 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7186 vsi->next_base_queue = num_qps;
7187 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7189 /* Update the VSI after updating the VSI queue-mapping
7192 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7194 dev_info(&pf->pdev->dev,
7195 "Update vsi tc config failed, err %s aq_err %s\n",
7196 i40e_stat_str(hw, ret),
7197 i40e_aq_str(hw, hw->aq.asq_last_status));
7200 /* update the local VSI info with updated queue map */
7201 i40e_vsi_update_queue_map(vsi, &ctxt);
7202 vsi->info.valid_sections = 0;
7204 /* Create channels for macvlans */
7205 INIT_LIST_HEAD(&vsi->macvlan_list);
7206 for (i = 0; i < macvlan_cnt; i++) {
7207 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7212 INIT_LIST_HEAD(&ch->list);
7213 ch->num_queue_pairs = qcnt;
7214 if (!i40e_setup_channel(pf, vsi, ch)) {
7219 ch->parent_vsi = vsi;
7220 vsi->cnt_q_avail -= ch->num_queue_pairs;
7222 list_add_tail(&ch->list, &vsi->macvlan_list);
7228 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7229 i40e_free_macvlan_channels(vsi);
7235 * i40e_fwd_add - configure macvlans
7236 * @netdev: net device to configure
7237 * @vdev: macvlan netdevice
7239 static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7241 struct i40e_netdev_priv *np = netdev_priv(netdev);
7242 u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7243 struct i40e_vsi *vsi = np->vsi;
7244 struct i40e_pf *pf = vsi->back;
7245 struct i40e_fwd_adapter *fwd;
7246 int avail_macvlan, ret;
7248 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7249 netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7250 return ERR_PTR(-EINVAL);
7252 if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
7253 netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7254 return ERR_PTR(-EINVAL);
7256 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7257 netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7258 return ERR_PTR(-EINVAL);
7261 /* The macvlan device has to be a single Q device so that the
7262 * tc_to_txq field can be reused to pick the tx queue.
7264 if (netif_is_multiqueue(vdev))
7265 return ERR_PTR(-ERANGE);
7267 if (!vsi->macvlan_cnt) {
7268 /* reserve bit 0 for the pf device */
7269 set_bit(0, vsi->fwd_bitmask);
7271 /* Try to reserve as many queues as possible for macvlans. First
7272 * reserve 3/4th of max vectors, then half, then quarter and
7273 * calculate Qs per macvlan as you go
7275 vectors = pf->num_lan_msix;
7276 if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
7277 /* allocate 4 Qs per macvlan and 32 Qs to the PF*/
7279 macvlan_cnt = (vectors - 32) / 4;
7280 } else if (vectors <= 64 && vectors > 32) {
7281 /* allocate 2 Qs per macvlan and 16 Qs to the PF*/
7283 macvlan_cnt = (vectors - 16) / 2;
7284 } else if (vectors <= 32 && vectors > 16) {
7285 /* allocate 1 Q per macvlan and 16 Qs to the PF*/
7287 macvlan_cnt = vectors - 16;
7288 } else if (vectors <= 16 && vectors > 8) {
7289 /* allocate 1 Q per macvlan and 8 Qs to the PF */
7291 macvlan_cnt = vectors - 8;
7293 /* allocate 1 Q per macvlan and 1 Q to the PF */
7295 macvlan_cnt = vectors - 1;
7298 if (macvlan_cnt == 0)
7299 return ERR_PTR(-EBUSY);
7301 /* Quiesce VSI queues */
7302 i40e_quiesce_vsi(vsi);
7304 /* sets up the macvlans but does not "enable" them */
7305 ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
7308 return ERR_PTR(ret);
7311 i40e_unquiesce_vsi(vsi);
7313 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
7315 if (avail_macvlan >= I40E_MAX_MACVLANS)
7316 return ERR_PTR(-EBUSY);
7318 /* create the fwd struct */
7319 fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
7321 return ERR_PTR(-ENOMEM);
7323 set_bit(avail_macvlan, vsi->fwd_bitmask);
7324 fwd->bit_no = avail_macvlan;
7325 netdev_set_sb_channel(vdev, avail_macvlan);
7328 if (!netif_running(netdev))
7331 /* Set fwd ring up */
7332 ret = i40e_fwd_ring_up(vsi, vdev, fwd);
7334 /* unbind the queues and drop the subordinate channel config */
7335 netdev_unbind_sb_channel(netdev, vdev);
7336 netdev_set_sb_channel(vdev, 0);
7339 return ERR_PTR(-EINVAL);
7346 * i40e_del_all_macvlans - Delete all the mac filters on the channels
7347 * @vsi: the VSI we want to access
7349 static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
7351 struct i40e_channel *ch, *ch_tmp;
7352 struct i40e_pf *pf = vsi->back;
7353 struct i40e_hw *hw = &pf->hw;
7354 int aq_err, ret = 0;
7356 if (list_empty(&vsi->macvlan_list))
7359 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7360 if (i40e_is_channel_macvlan(ch)) {
7361 ret = i40e_del_macvlan_filter(hw, ch->seid,
7362 i40e_channel_mac(ch),
7365 /* Reset queue contexts */
7366 i40e_reset_ch_rings(vsi, ch);
7367 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7368 netdev_unbind_sb_channel(vsi->netdev,
7370 netdev_set_sb_channel(ch->fwd->netdev, 0);
7379 * i40e_fwd_del - delete macvlan interfaces
7380 * @netdev: net device to configure
7381 * @vdev: macvlan netdevice
7383 static void i40e_fwd_del(struct net_device *netdev, void *vdev)
7385 struct i40e_netdev_priv *np = netdev_priv(netdev);
7386 struct i40e_fwd_adapter *fwd = vdev;
7387 struct i40e_channel *ch, *ch_tmp;
7388 struct i40e_vsi *vsi = np->vsi;
7389 struct i40e_pf *pf = vsi->back;
7390 struct i40e_hw *hw = &pf->hw;
7391 int aq_err, ret = 0;
7393 /* Find the channel associated with the macvlan and del mac filter */
7394 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7395 if (i40e_is_channel_macvlan(ch) &&
7396 ether_addr_equal(i40e_channel_mac(ch),
7397 fwd->netdev->dev_addr)) {
7398 ret = i40e_del_macvlan_filter(hw, ch->seid,
7399 i40e_channel_mac(ch),
7402 /* Reset queue contexts */
7403 i40e_reset_ch_rings(vsi, ch);
7404 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7405 netdev_unbind_sb_channel(netdev, fwd->netdev);
7406 netdev_set_sb_channel(fwd->netdev, 0);
7410 dev_info(&pf->pdev->dev,
7411 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
7412 i40e_stat_str(hw, ret),
7413 i40e_aq_str(hw, aq_err));
7421 * i40e_setup_tc - configure multiple traffic classes
7422 * @netdev: net device to configure
7423 * @type_data: tc offload data
7425 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
7427 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
7428 struct i40e_netdev_priv *np = netdev_priv(netdev);
7429 struct i40e_vsi *vsi = np->vsi;
7430 struct i40e_pf *pf = vsi->back;
7431 u8 enabled_tc = 0, num_tc, hw;
7432 bool need_reset = false;
7433 int old_queue_pairs;
7438 old_queue_pairs = vsi->num_queue_pairs;
7439 num_tc = mqprio_qopt->qopt.num_tc;
7440 hw = mqprio_qopt->qopt.hw;
7441 mode = mqprio_qopt->mode;
7443 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7444 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
7448 /* Check if MFP enabled */
7449 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
7451 "Configuring TC not supported in MFP mode\n");
7455 case TC_MQPRIO_MODE_DCB:
7456 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7458 /* Check if DCB enabled to continue */
7459 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
7461 "DCB is not enabled for adapter\n");
7465 /* Check whether tc count is within enabled limit */
7466 if (num_tc > i40e_pf_get_num_tc(pf)) {
7468 "TC count greater than enabled on link for adapter\n");
7472 case TC_MQPRIO_MODE_CHANNEL:
7473 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
7475 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
7478 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7480 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
7483 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
7484 sizeof(*mqprio_qopt));
7485 pf->flags |= I40E_FLAG_TC_MQPRIO;
7486 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7493 /* Generate TC map for number of tc requested */
7494 for (i = 0; i < num_tc; i++)
7495 enabled_tc |= BIT(i);
7497 /* Requesting same TC configuration as already enabled */
7498 if (enabled_tc == vsi->tc_config.enabled_tc &&
7499 mode != TC_MQPRIO_MODE_CHANNEL)
7502 /* Quiesce VSI queues */
7503 i40e_quiesce_vsi(vsi);
7505 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
7506 i40e_remove_queue_channels(vsi);
7508 /* Configure VSI for enabled TCs */
7509 ret = i40e_vsi_config_tc(vsi, enabled_tc);
7511 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
7516 dev_info(&vsi->back->pdev->dev,
7517 "Setup channel (id:%u) utilizing num_queues %d\n",
7518 vsi->seid, vsi->tc_config.tc_info[0].qcount);
7521 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
7522 if (vsi->mqprio_qopt.max_rate[0]) {
7523 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
7525 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
7526 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
7528 u64 credits = max_tx_rate;
7530 do_div(credits, I40E_BW_CREDIT_DIVISOR);
7531 dev_dbg(&vsi->back->pdev->dev,
7532 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
7541 ret = i40e_configure_queue_channels(vsi);
7543 vsi->num_queue_pairs = old_queue_pairs;
7545 "Failed configuring queue channels\n");
7552 /* Reset the configuration data to defaults, only TC0 is enabled */
7554 i40e_vsi_set_default_tc_config(vsi);
7559 i40e_unquiesce_vsi(vsi);
7564 * i40e_set_cld_element - sets cloud filter element data
7565 * @filter: cloud filter rule
7566 * @cld: ptr to cloud filter element data
7568 * This is helper function to copy data into cloud filter element
7571 i40e_set_cld_element(struct i40e_cloud_filter *filter,
7572 struct i40e_aqc_cloud_filters_element_data *cld)
7577 memset(cld, 0, sizeof(*cld));
7578 ether_addr_copy(cld->outer_mac, filter->dst_mac);
7579 ether_addr_copy(cld->inner_mac, filter->src_mac);
7581 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
7584 if (filter->n_proto == ETH_P_IPV6) {
7585 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
7586 for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
7588 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
7589 ipa = cpu_to_le32(ipa);
7590 memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
7593 ipa = be32_to_cpu(filter->dst_ipv4);
7594 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
7597 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
7599 /* tenant_id is not supported by FW now, once the support is enabled
7600 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
7602 if (filter->tenant_id)
7607 * i40e_add_del_cloud_filter - Add/del cloud filter
7608 * @vsi: pointer to VSI
7609 * @filter: cloud filter rule
7610 * @add: if true, add, if false, delete
7612 * Add or delete a cloud filter for a specific flow spec.
7613 * Returns 0 if the filter were successfully added.
7615 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
7616 struct i40e_cloud_filter *filter, bool add)
7618 struct i40e_aqc_cloud_filters_element_data cld_filter;
7619 struct i40e_pf *pf = vsi->back;
7621 static const u16 flag_table[128] = {
7622 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
7623 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
7624 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
7625 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
7626 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
7627 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
7628 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
7629 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
7630 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
7631 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
7632 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
7633 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
7634 [I40E_CLOUD_FILTER_FLAGS_IIP] =
7635 I40E_AQC_ADD_CLOUD_FILTER_IIP,
7638 if (filter->flags >= ARRAY_SIZE(flag_table))
7639 return I40E_ERR_CONFIG;
7641 /* copy element needed to add cloud filter from filter */
7642 i40e_set_cld_element(filter, &cld_filter);
7644 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
7645 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
7646 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
7648 if (filter->n_proto == ETH_P_IPV6)
7649 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7650 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7652 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7653 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7656 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
7659 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
7662 dev_dbg(&pf->pdev->dev,
7663 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
7664 add ? "add" : "delete", filter->dst_port, ret,
7665 pf->hw.aq.asq_last_status);
7667 dev_info(&pf->pdev->dev,
7668 "%s cloud filter for VSI: %d\n",
7669 add ? "Added" : "Deleted", filter->seid);
7674 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
7675 * @vsi: pointer to VSI
7676 * @filter: cloud filter rule
7677 * @add: if true, add, if false, delete
7679 * Add or delete a cloud filter for a specific flow spec using big buffer.
7680 * Returns 0 if the filter were successfully added.
7682 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
7683 struct i40e_cloud_filter *filter,
7686 struct i40e_aqc_cloud_filters_element_bb cld_filter;
7687 struct i40e_pf *pf = vsi->back;
7690 /* Both (src/dst) valid mac_addr are not supported */
7691 if ((is_valid_ether_addr(filter->dst_mac) &&
7692 is_valid_ether_addr(filter->src_mac)) ||
7693 (is_multicast_ether_addr(filter->dst_mac) &&
7694 is_multicast_ether_addr(filter->src_mac)))
7697 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
7698 * ports are not supported via big buffer now.
7700 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
7703 /* adding filter using src_port/src_ip is not supported at this stage */
7704 if (filter->src_port || filter->src_ipv4 ||
7705 !ipv6_addr_any(&filter->ip.v6.src_ip6))
7708 /* copy element needed to add cloud filter from filter */
7709 i40e_set_cld_element(filter, &cld_filter.element);
7711 if (is_valid_ether_addr(filter->dst_mac) ||
7712 is_valid_ether_addr(filter->src_mac) ||
7713 is_multicast_ether_addr(filter->dst_mac) ||
7714 is_multicast_ether_addr(filter->src_mac)) {
7715 /* MAC + IP : unsupported mode */
7716 if (filter->dst_ipv4)
7719 /* since we validated that L4 port must be valid before
7720 * we get here, start with respective "flags" value
7721 * and update if vlan is present or not
7723 cld_filter.element.flags =
7724 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
7726 if (filter->vlan_id) {
7727 cld_filter.element.flags =
7728 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
7731 } else if (filter->dst_ipv4 ||
7732 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
7733 cld_filter.element.flags =
7734 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
7735 if (filter->n_proto == ETH_P_IPV6)
7736 cld_filter.element.flags |=
7737 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7739 cld_filter.element.flags |=
7740 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7742 dev_err(&pf->pdev->dev,
7743 "either mac or ip has to be valid for cloud filter\n");
7747 /* Now copy L4 port in Byte 6..7 in general fields */
7748 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
7749 be16_to_cpu(filter->dst_port);
7752 /* Validate current device switch mode, change if necessary */
7753 ret = i40e_validate_and_set_switch_mode(vsi);
7755 dev_err(&pf->pdev->dev,
7756 "failed to set switch mode, ret %d\n",
7761 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
7764 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
7769 dev_dbg(&pf->pdev->dev,
7770 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7771 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
7773 dev_info(&pf->pdev->dev,
7774 "%s cloud filter for VSI: %d, L4 port: %d\n",
7775 add ? "add" : "delete", filter->seid,
7776 ntohs(filter->dst_port));
7781 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
7782 * @vsi: Pointer to VSI
7783 * @cls_flower: Pointer to struct flow_cls_offload
7784 * @filter: Pointer to cloud filter structure
7787 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7788 struct flow_cls_offload *f,
7789 struct i40e_cloud_filter *filter)
7791 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
7792 struct flow_dissector *dissector = rule->match.dissector;
7793 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
7794 struct i40e_pf *pf = vsi->back;
7797 if (dissector->used_keys &
7798 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7799 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7800 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7801 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7802 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7803 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7804 BIT(FLOW_DISSECTOR_KEY_PORTS) |
7805 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
7806 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
7807 dissector->used_keys);
7811 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
7812 struct flow_match_enc_keyid match;
7814 flow_rule_match_enc_keyid(rule, &match);
7815 if (match.mask->keyid != 0)
7816 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
7818 filter->tenant_id = be32_to_cpu(match.key->keyid);
7821 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
7822 struct flow_match_basic match;
7824 flow_rule_match_basic(rule, &match);
7825 n_proto_key = ntohs(match.key->n_proto);
7826 n_proto_mask = ntohs(match.mask->n_proto);
7828 if (n_proto_key == ETH_P_ALL) {
7832 filter->n_proto = n_proto_key & n_proto_mask;
7833 filter->ip_proto = match.key->ip_proto;
7836 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7837 struct flow_match_eth_addrs match;
7839 flow_rule_match_eth_addrs(rule, &match);
7841 /* use is_broadcast and is_zero to check for all 0xf or 0 */
7842 if (!is_zero_ether_addr(match.mask->dst)) {
7843 if (is_broadcast_ether_addr(match.mask->dst)) {
7844 field_flags |= I40E_CLOUD_FIELD_OMAC;
7846 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
7848 return I40E_ERR_CONFIG;
7852 if (!is_zero_ether_addr(match.mask->src)) {
7853 if (is_broadcast_ether_addr(match.mask->src)) {
7854 field_flags |= I40E_CLOUD_FIELD_IMAC;
7856 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
7858 return I40E_ERR_CONFIG;
7861 ether_addr_copy(filter->dst_mac, match.key->dst);
7862 ether_addr_copy(filter->src_mac, match.key->src);
7865 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
7866 struct flow_match_vlan match;
7868 flow_rule_match_vlan(rule, &match);
7869 if (match.mask->vlan_id) {
7870 if (match.mask->vlan_id == VLAN_VID_MASK) {
7871 field_flags |= I40E_CLOUD_FIELD_IVLAN;
7874 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
7875 match.mask->vlan_id);
7876 return I40E_ERR_CONFIG;
7880 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
7883 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
7884 struct flow_match_control match;
7886 flow_rule_match_control(rule, &match);
7887 addr_type = match.key->addr_type;
7890 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7891 struct flow_match_ipv4_addrs match;
7893 flow_rule_match_ipv4_addrs(rule, &match);
7894 if (match.mask->dst) {
7895 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
7896 field_flags |= I40E_CLOUD_FIELD_IIP;
7898 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
7900 return I40E_ERR_CONFIG;
7904 if (match.mask->src) {
7905 if (match.mask->src == cpu_to_be32(0xffffffff)) {
7906 field_flags |= I40E_CLOUD_FIELD_IIP;
7908 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
7910 return I40E_ERR_CONFIG;
7914 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
7915 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
7916 return I40E_ERR_CONFIG;
7918 filter->dst_ipv4 = match.key->dst;
7919 filter->src_ipv4 = match.key->src;
7922 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7923 struct flow_match_ipv6_addrs match;
7925 flow_rule_match_ipv6_addrs(rule, &match);
7927 /* src and dest IPV6 address should not be LOOPBACK
7928 * (0:0:0:0:0:0:0:1), which can be represented as ::1
7930 if (ipv6_addr_loopback(&match.key->dst) ||
7931 ipv6_addr_loopback(&match.key->src)) {
7932 dev_err(&pf->pdev->dev,
7933 "Bad ipv6, addr is LOOPBACK\n");
7934 return I40E_ERR_CONFIG;
7936 if (!ipv6_addr_any(&match.mask->dst) ||
7937 !ipv6_addr_any(&match.mask->src))
7938 field_flags |= I40E_CLOUD_FIELD_IIP;
7940 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
7941 sizeof(filter->src_ipv6));
7942 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
7943 sizeof(filter->dst_ipv6));
7946 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
7947 struct flow_match_ports match;
7949 flow_rule_match_ports(rule, &match);
7950 if (match.mask->src) {
7951 if (match.mask->src == cpu_to_be16(0xffff)) {
7952 field_flags |= I40E_CLOUD_FIELD_IIP;
7954 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
7955 be16_to_cpu(match.mask->src));
7956 return I40E_ERR_CONFIG;
7960 if (match.mask->dst) {
7961 if (match.mask->dst == cpu_to_be16(0xffff)) {
7962 field_flags |= I40E_CLOUD_FIELD_IIP;
7964 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
7965 be16_to_cpu(match.mask->dst));
7966 return I40E_ERR_CONFIG;
7970 filter->dst_port = match.key->dst;
7971 filter->src_port = match.key->src;
7973 switch (filter->ip_proto) {
7978 dev_err(&pf->pdev->dev,
7979 "Only UDP and TCP transport are supported\n");
7983 filter->flags = field_flags;
7988 * i40e_handle_tclass: Forward to a traffic class on the device
7989 * @vsi: Pointer to VSI
7990 * @tc: traffic class index on the device
7991 * @filter: Pointer to cloud filter structure
7994 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
7995 struct i40e_cloud_filter *filter)
7997 struct i40e_channel *ch, *ch_tmp;
7999 /* direct to a traffic class on the same device */
8001 filter->seid = vsi->seid;
8003 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
8004 if (!filter->dst_port) {
8005 dev_err(&vsi->back->pdev->dev,
8006 "Specify destination port to direct to traffic class that is not default\n");
8009 if (list_empty(&vsi->ch_list))
8011 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
8013 if (ch->seid == vsi->tc_seid_map[tc])
8014 filter->seid = ch->seid;
8018 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
8023 * i40e_configure_clsflower - Configure tc flower filters
8024 * @vsi: Pointer to VSI
8025 * @cls_flower: Pointer to struct flow_cls_offload
8028 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
8029 struct flow_cls_offload *cls_flower)
8031 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
8032 struct i40e_cloud_filter *filter = NULL;
8033 struct i40e_pf *pf = vsi->back;
8037 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
8041 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
8042 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
8045 if (pf->fdir_pf_active_filters ||
8046 (!hlist_empty(&pf->fdir_filter_list))) {
8047 dev_err(&vsi->back->pdev->dev,
8048 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8052 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
8053 dev_err(&vsi->back->pdev->dev,
8054 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8055 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8056 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8059 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8063 filter->cookie = cls_flower->cookie;
8065 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8069 err = i40e_handle_tclass(vsi, tc, filter);
8073 /* Add cloud filter */
8074 if (filter->dst_port)
8075 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8077 err = i40e_add_del_cloud_filter(vsi, filter, true);
8080 dev_err(&pf->pdev->dev,
8081 "Failed to add cloud filter, err %s\n",
8082 i40e_stat_str(&pf->hw, err));
8086 /* add filter to the ordered list */
8087 INIT_HLIST_NODE(&filter->cloud_node);
8089 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8091 pf->num_cloud_filters++;
8100 * i40e_find_cloud_filter - Find the could filter in the list
8101 * @vsi: Pointer to VSI
8102 * @cookie: filter specific cookie
8105 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8106 unsigned long *cookie)
8108 struct i40e_cloud_filter *filter = NULL;
8109 struct hlist_node *node2;
8111 hlist_for_each_entry_safe(filter, node2,
8112 &vsi->back->cloud_filter_list, cloud_node)
8113 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8119 * i40e_delete_clsflower - Remove tc flower filters
8120 * @vsi: Pointer to VSI
8121 * @cls_flower: Pointer to struct flow_cls_offload
8124 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8125 struct flow_cls_offload *cls_flower)
8127 struct i40e_cloud_filter *filter = NULL;
8128 struct i40e_pf *pf = vsi->back;
8131 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8136 hash_del(&filter->cloud_node);
8138 if (filter->dst_port)
8139 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8141 err = i40e_add_del_cloud_filter(vsi, filter, false);
8145 dev_err(&pf->pdev->dev,
8146 "Failed to delete cloud filter, err %s\n",
8147 i40e_stat_str(&pf->hw, err));
8148 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8151 pf->num_cloud_filters--;
8152 if (!pf->num_cloud_filters)
8153 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8154 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8155 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8156 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8157 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8163 * i40e_setup_tc_cls_flower - flower classifier offloads
8164 * @netdev: net device to configure
8165 * @type_data: offload data
8167 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8168 struct flow_cls_offload *cls_flower)
8170 struct i40e_vsi *vsi = np->vsi;
8172 switch (cls_flower->command) {
8173 case FLOW_CLS_REPLACE:
8174 return i40e_configure_clsflower(vsi, cls_flower);
8175 case FLOW_CLS_DESTROY:
8176 return i40e_delete_clsflower(vsi, cls_flower);
8177 case FLOW_CLS_STATS:
8184 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8187 struct i40e_netdev_priv *np = cb_priv;
8189 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8193 case TC_SETUP_CLSFLOWER:
8194 return i40e_setup_tc_cls_flower(np, type_data);
8201 static LIST_HEAD(i40e_block_cb_list);
8203 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8206 struct i40e_netdev_priv *np = netdev_priv(netdev);
8209 case TC_SETUP_QDISC_MQPRIO:
8210 return i40e_setup_tc(netdev, type_data);
8211 case TC_SETUP_BLOCK:
8212 return flow_block_cb_setup_simple(type_data,
8213 &i40e_block_cb_list,
8214 i40e_setup_tc_block_cb,
8222 * i40e_open - Called when a network interface is made active
8223 * @netdev: network interface device structure
8225 * The open entry point is called when a network interface is made
8226 * active by the system (IFF_UP). At this point all resources needed
8227 * for transmit and receive operations are allocated, the interrupt
8228 * handler is registered with the OS, the netdev watchdog subtask is
8229 * enabled, and the stack is notified that the interface is ready.
8231 * Returns 0 on success, negative value on failure
8233 int i40e_open(struct net_device *netdev)
8235 struct i40e_netdev_priv *np = netdev_priv(netdev);
8236 struct i40e_vsi *vsi = np->vsi;
8237 struct i40e_pf *pf = vsi->back;
8240 /* disallow open during test or if eeprom is broken */
8241 if (test_bit(__I40E_TESTING, pf->state) ||
8242 test_bit(__I40E_BAD_EEPROM, pf->state))
8245 netif_carrier_off(netdev);
8247 if (i40e_force_link_state(pf, true))
8250 err = i40e_vsi_open(vsi);
8254 /* configure global TSO hardware offload settings */
8255 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
8256 TCP_FLAG_FIN) >> 16);
8257 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
8259 TCP_FLAG_CWR) >> 16);
8260 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
8262 udp_tunnel_get_rx_info(netdev);
8269 * @vsi: the VSI to open
8271 * Finish initialization of the VSI.
8273 * Returns 0 on success, negative value on failure
8275 * Note: expects to be called while under rtnl_lock()
8277 int i40e_vsi_open(struct i40e_vsi *vsi)
8279 struct i40e_pf *pf = vsi->back;
8280 char int_name[I40E_INT_NAME_STR_LEN];
8283 /* allocate descriptors */
8284 err = i40e_vsi_setup_tx_resources(vsi);
8287 err = i40e_vsi_setup_rx_resources(vsi);
8291 err = i40e_vsi_configure(vsi);
8296 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
8297 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
8298 err = i40e_vsi_request_irq(vsi, int_name);
8302 /* Notify the stack of the actual queue counts. */
8303 err = netif_set_real_num_tx_queues(vsi->netdev,
8304 vsi->num_queue_pairs);
8306 goto err_set_queues;
8308 err = netif_set_real_num_rx_queues(vsi->netdev,
8309 vsi->num_queue_pairs);
8311 goto err_set_queues;
8313 } else if (vsi->type == I40E_VSI_FDIR) {
8314 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
8315 dev_driver_string(&pf->pdev->dev),
8316 dev_name(&pf->pdev->dev));
8317 err = i40e_vsi_request_irq(vsi, int_name);
8324 err = i40e_up_complete(vsi);
8326 goto err_up_complete;
8333 i40e_vsi_free_irq(vsi);
8335 i40e_vsi_free_rx_resources(vsi);
8337 i40e_vsi_free_tx_resources(vsi);
8338 if (vsi == pf->vsi[pf->lan_vsi])
8339 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
8345 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
8346 * @pf: Pointer to PF
8348 * This function destroys the hlist where all the Flow Director
8349 * filters were saved.
8351 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
8353 struct i40e_fdir_filter *filter;
8354 struct i40e_flex_pit *pit_entry, *tmp;
8355 struct hlist_node *node2;
8357 hlist_for_each_entry_safe(filter, node2,
8358 &pf->fdir_filter_list, fdir_node) {
8359 hlist_del(&filter->fdir_node);
8363 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
8364 list_del(&pit_entry->list);
8367 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
8369 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
8370 list_del(&pit_entry->list);
8373 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
8375 pf->fdir_pf_active_filters = 0;
8376 pf->fd_tcp4_filter_cnt = 0;
8377 pf->fd_udp4_filter_cnt = 0;
8378 pf->fd_sctp4_filter_cnt = 0;
8379 pf->fd_ip4_filter_cnt = 0;
8381 /* Reprogram the default input set for TCP/IPv4 */
8382 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8383 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8384 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8386 /* Reprogram the default input set for UDP/IPv4 */
8387 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
8388 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8389 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8391 /* Reprogram the default input set for SCTP/IPv4 */
8392 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
8393 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8394 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8396 /* Reprogram the default input set for Other/IPv4 */
8397 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
8398 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8400 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
8401 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8405 * i40e_cloud_filter_exit - Cleans up the cloud filters
8406 * @pf: Pointer to PF
8408 * This function destroys the hlist where all the cloud filters
8411 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
8413 struct i40e_cloud_filter *cfilter;
8414 struct hlist_node *node;
8416 hlist_for_each_entry_safe(cfilter, node,
8417 &pf->cloud_filter_list, cloud_node) {
8418 hlist_del(&cfilter->cloud_node);
8421 pf->num_cloud_filters = 0;
8423 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8424 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8425 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8426 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8427 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8432 * i40e_close - Disables a network interface
8433 * @netdev: network interface device structure
8435 * The close entry point is called when an interface is de-activated
8436 * by the OS. The hardware is still under the driver's control, but
8437 * this netdev interface is disabled.
8439 * Returns 0, this is not allowed to fail
8441 int i40e_close(struct net_device *netdev)
8443 struct i40e_netdev_priv *np = netdev_priv(netdev);
8444 struct i40e_vsi *vsi = np->vsi;
8446 i40e_vsi_close(vsi);
8452 * i40e_do_reset - Start a PF or Core Reset sequence
8453 * @pf: board private structure
8454 * @reset_flags: which reset is requested
8455 * @lock_acquired: indicates whether or not the lock has been acquired
8456 * before this function was called.
8458 * The essential difference in resets is that the PF Reset
8459 * doesn't clear the packet buffers, doesn't reset the PE
8460 * firmware, and doesn't bother the other PFs on the chip.
8462 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
8466 WARN_ON(in_interrupt());
8469 /* do the biggest reset indicated */
8470 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
8472 /* Request a Global Reset
8474 * This will start the chip's countdown to the actual full
8475 * chip reset event, and a warning interrupt to be sent
8476 * to all PFs, including the requestor. Our handler
8477 * for the warning interrupt will deal with the shutdown
8478 * and recovery of the switch setup.
8480 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
8481 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8482 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
8483 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8485 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
8487 /* Request a Core Reset
8489 * Same as Global Reset, except does *not* include the MAC/PHY
8491 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
8492 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8493 val |= I40E_GLGEN_RTRIG_CORER_MASK;
8494 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8495 i40e_flush(&pf->hw);
8497 } else if (reset_flags & I40E_PF_RESET_FLAG) {
8499 /* Request a PF Reset
8501 * Resets only the PF-specific registers
8503 * This goes directly to the tear-down and rebuild of
8504 * the switch, since we need to do all the recovery as
8505 * for the Core Reset.
8507 dev_dbg(&pf->pdev->dev, "PFR requested\n");
8508 i40e_handle_reset_warning(pf, lock_acquired);
8510 dev_info(&pf->pdev->dev,
8511 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
8512 "FW LLDP is disabled\n" :
8513 "FW LLDP is enabled\n");
8515 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
8518 /* Find the VSI(s) that requested a re-init */
8519 dev_info(&pf->pdev->dev,
8520 "VSI reinit requested\n");
8521 for (v = 0; v < pf->num_alloc_vsi; v++) {
8522 struct i40e_vsi *vsi = pf->vsi[v];
8525 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
8527 i40e_vsi_reinit_locked(pf->vsi[v]);
8529 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
8532 /* Find the VSI(s) that needs to be brought down */
8533 dev_info(&pf->pdev->dev, "VSI down requested\n");
8534 for (v = 0; v < pf->num_alloc_vsi; v++) {
8535 struct i40e_vsi *vsi = pf->vsi[v];
8538 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
8540 set_bit(__I40E_VSI_DOWN, vsi->state);
8545 dev_info(&pf->pdev->dev,
8546 "bad reset request 0x%08x\n", reset_flags);
8550 #ifdef CONFIG_I40E_DCB
8552 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
8553 * @pf: board private structure
8554 * @old_cfg: current DCB config
8555 * @new_cfg: new DCB config
8557 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
8558 struct i40e_dcbx_config *old_cfg,
8559 struct i40e_dcbx_config *new_cfg)
8561 bool need_reconfig = false;
8563 /* Check if ETS configuration has changed */
8564 if (memcmp(&new_cfg->etscfg,
8566 sizeof(new_cfg->etscfg))) {
8567 /* If Priority Table has changed reconfig is needed */
8568 if (memcmp(&new_cfg->etscfg.prioritytable,
8569 &old_cfg->etscfg.prioritytable,
8570 sizeof(new_cfg->etscfg.prioritytable))) {
8571 need_reconfig = true;
8572 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
8575 if (memcmp(&new_cfg->etscfg.tcbwtable,
8576 &old_cfg->etscfg.tcbwtable,
8577 sizeof(new_cfg->etscfg.tcbwtable)))
8578 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
8580 if (memcmp(&new_cfg->etscfg.tsatable,
8581 &old_cfg->etscfg.tsatable,
8582 sizeof(new_cfg->etscfg.tsatable)))
8583 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
8586 /* Check if PFC configuration has changed */
8587 if (memcmp(&new_cfg->pfc,
8589 sizeof(new_cfg->pfc))) {
8590 need_reconfig = true;
8591 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
8594 /* Check if APP Table has changed */
8595 if (memcmp(&new_cfg->app,
8597 sizeof(new_cfg->app))) {
8598 need_reconfig = true;
8599 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
8602 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
8603 return need_reconfig;
8607 * i40e_handle_lldp_event - Handle LLDP Change MIB event
8608 * @pf: board private structure
8609 * @e: event info posted on ARQ
8611 static int i40e_handle_lldp_event(struct i40e_pf *pf,
8612 struct i40e_arq_event_info *e)
8614 struct i40e_aqc_lldp_get_mib *mib =
8615 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
8616 struct i40e_hw *hw = &pf->hw;
8617 struct i40e_dcbx_config tmp_dcbx_cfg;
8618 bool need_reconfig = false;
8622 /* Not DCB capable or capability disabled */
8623 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
8626 /* Ignore if event is not for Nearest Bridge */
8627 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
8628 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
8629 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
8630 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
8633 /* Check MIB Type and return if event for Remote MIB update */
8634 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
8635 dev_dbg(&pf->pdev->dev,
8636 "LLDP event mib type %s\n", type ? "remote" : "local");
8637 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
8638 /* Update the remote cached instance and return */
8639 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
8640 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
8641 &hw->remote_dcbx_config);
8645 /* Store the old configuration */
8646 tmp_dcbx_cfg = hw->local_dcbx_config;
8648 /* Reset the old DCBx configuration data */
8649 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
8650 /* Get updated DCBX data from firmware */
8651 ret = i40e_get_dcb_config(&pf->hw);
8653 dev_info(&pf->pdev->dev,
8654 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
8655 i40e_stat_str(&pf->hw, ret),
8656 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8660 /* No change detected in DCBX configs */
8661 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
8662 sizeof(tmp_dcbx_cfg))) {
8663 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
8667 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
8668 &hw->local_dcbx_config);
8670 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
8675 /* Enable DCB tagging only when more than one TC */
8676 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
8677 pf->flags |= I40E_FLAG_DCB_ENABLED;
8679 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8681 set_bit(__I40E_PORT_SUSPENDED, pf->state);
8682 /* Reconfiguration needed quiesce all VSIs */
8683 i40e_pf_quiesce_all_vsi(pf);
8685 /* Changes in configuration update VEB/VSI */
8686 i40e_dcb_reconfigure(pf);
8688 ret = i40e_resume_port_tx(pf);
8690 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
8691 /* In case of error no point in resuming VSIs */
8695 /* Wait for the PF's queues to be disabled */
8696 ret = i40e_pf_wait_queues_disabled(pf);
8698 /* Schedule PF reset to recover */
8699 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8700 i40e_service_event_schedule(pf);
8702 i40e_pf_unquiesce_all_vsi(pf);
8703 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
8704 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
8710 #endif /* CONFIG_I40E_DCB */
8713 * i40e_do_reset_safe - Protected reset path for userland calls.
8714 * @pf: board private structure
8715 * @reset_flags: which reset is requested
8718 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
8721 i40e_do_reset(pf, reset_flags, true);
8726 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
8727 * @pf: board private structure
8728 * @e: event info posted on ARQ
8730 * Handler for LAN Queue Overflow Event generated by the firmware for PF
8733 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
8734 struct i40e_arq_event_info *e)
8736 struct i40e_aqc_lan_overflow *data =
8737 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
8738 u32 queue = le32_to_cpu(data->prtdcb_rupto);
8739 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
8740 struct i40e_hw *hw = &pf->hw;
8744 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8747 /* Queue belongs to VF, find the VF and issue VF reset */
8748 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
8749 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
8750 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
8751 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
8752 vf_id -= hw->func_caps.vf_base_id;
8753 vf = &pf->vf[vf_id];
8754 i40e_vc_notify_vf_reset(vf);
8755 /* Allow VF to process pending reset notification */
8757 i40e_reset_vf(vf, false);
8762 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
8763 * @pf: board private structure
8765 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
8769 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8770 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
8775 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
8776 * @pf: board private structure
8778 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
8782 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8783 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
8784 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
8785 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
8790 * i40e_get_global_fd_count - Get total FD filters programmed on device
8791 * @pf: board private structure
8793 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
8797 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
8798 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
8799 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
8800 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
8805 * i40e_reenable_fdir_sb - Restore FDir SB capability
8806 * @pf: board private structure
8808 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
8810 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
8811 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8812 (I40E_DEBUG_FD & pf->hw.debug_mask))
8813 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8817 * i40e_reenable_fdir_atr - Restore FDir ATR capability
8818 * @pf: board private structure
8820 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
8822 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
8823 /* ATR uses the same filtering logic as SB rules. It only
8824 * functions properly if the input set mask is at the default
8825 * settings. It is safe to restore the default input set
8826 * because there are no active TCPv4 filter rules.
8828 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8829 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8830 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8832 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8833 (I40E_DEBUG_FD & pf->hw.debug_mask))
8834 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8839 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
8840 * @pf: board private structure
8841 * @filter: FDir filter to remove
8843 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
8844 struct i40e_fdir_filter *filter)
8846 /* Update counters */
8847 pf->fdir_pf_active_filters--;
8850 switch (filter->flow_type) {
8852 pf->fd_tcp4_filter_cnt--;
8855 pf->fd_udp4_filter_cnt--;
8858 pf->fd_sctp4_filter_cnt--;
8861 switch (filter->ip4_proto) {
8863 pf->fd_tcp4_filter_cnt--;
8866 pf->fd_udp4_filter_cnt--;
8869 pf->fd_sctp4_filter_cnt--;
8872 pf->fd_ip4_filter_cnt--;
8878 /* Remove the filter from the list and free memory */
8879 hlist_del(&filter->fdir_node);
8884 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
8885 * @pf: board private structure
8887 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
8889 struct i40e_fdir_filter *filter;
8890 u32 fcnt_prog, fcnt_avail;
8891 struct hlist_node *node;
8893 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8896 /* Check if we have enough room to re-enable FDir SB capability. */
8897 fcnt_prog = i40e_get_global_fd_count(pf);
8898 fcnt_avail = pf->fdir_pf_filter_count;
8899 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
8900 (pf->fd_add_err == 0) ||
8901 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
8902 i40e_reenable_fdir_sb(pf);
8904 /* We should wait for even more space before re-enabling ATR.
8905 * Additionally, we cannot enable ATR as long as we still have TCP SB
8908 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
8909 (pf->fd_tcp4_filter_cnt == 0))
8910 i40e_reenable_fdir_atr(pf);
8912 /* if hw had a problem adding a filter, delete it */
8913 if (pf->fd_inv > 0) {
8914 hlist_for_each_entry_safe(filter, node,
8915 &pf->fdir_filter_list, fdir_node)
8916 if (filter->fd_id == pf->fd_inv)
8917 i40e_delete_invalid_filter(pf, filter);
8921 #define I40E_MIN_FD_FLUSH_INTERVAL 10
8922 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8924 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
8925 * @pf: board private structure
8927 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
8929 unsigned long min_flush_time;
8930 int flush_wait_retry = 50;
8931 bool disable_atr = false;
8935 if (!time_after(jiffies, pf->fd_flush_timestamp +
8936 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
8939 /* If the flush is happening too quick and we have mostly SB rules we
8940 * should not re-enable ATR for some time.
8942 min_flush_time = pf->fd_flush_timestamp +
8943 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
8944 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
8946 if (!(time_after(jiffies, min_flush_time)) &&
8947 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
8948 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8949 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
8953 pf->fd_flush_timestamp = jiffies;
8954 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8955 /* flush all filters */
8956 wr32(&pf->hw, I40E_PFQF_CTL_1,
8957 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
8958 i40e_flush(&pf->hw);
8962 /* Check FD flush status every 5-6msec */
8963 usleep_range(5000, 6000);
8964 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
8965 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
8967 } while (flush_wait_retry--);
8968 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
8969 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
8971 /* replay sideband filters */
8972 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
8973 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
8974 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8975 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
8976 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8977 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
8982 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
8983 * @pf: board private structure
8985 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
8987 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
8991 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
8992 * @pf: board private structure
8994 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
8997 /* if interface is down do nothing */
8998 if (test_bit(__I40E_DOWN, pf->state))
9001 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9002 i40e_fdir_flush_and_replay(pf);
9004 i40e_fdir_check_and_reenable(pf);
9009 * i40e_vsi_link_event - notify VSI of a link event
9010 * @vsi: vsi to be notified
9011 * @link_up: link up or down
9013 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
9015 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
9018 switch (vsi->type) {
9020 if (!vsi->netdev || !vsi->netdev_registered)
9024 netif_carrier_on(vsi->netdev);
9025 netif_tx_wake_all_queues(vsi->netdev);
9027 netif_carrier_off(vsi->netdev);
9028 netif_tx_stop_all_queues(vsi->netdev);
9032 case I40E_VSI_SRIOV:
9033 case I40E_VSI_VMDQ2:
9035 case I40E_VSI_IWARP:
9036 case I40E_VSI_MIRROR:
9038 /* there is no notification for other VSIs */
9044 * i40e_veb_link_event - notify elements on the veb of a link event
9045 * @veb: veb to be notified
9046 * @link_up: link up or down
9048 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9053 if (!veb || !veb->pf)
9057 /* depth first... */
9058 for (i = 0; i < I40E_MAX_VEB; i++)
9059 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9060 i40e_veb_link_event(pf->veb[i], link_up);
9062 /* ... now the local VSIs */
9063 for (i = 0; i < pf->num_alloc_vsi; i++)
9064 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9065 i40e_vsi_link_event(pf->vsi[i], link_up);
9069 * i40e_link_event - Update netif_carrier status
9070 * @pf: board private structure
9072 static void i40e_link_event(struct i40e_pf *pf)
9074 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9075 u8 new_link_speed, old_link_speed;
9077 bool new_link, old_link;
9079 /* set this to force the get_link_status call to refresh state */
9080 pf->hw.phy.get_link_info = true;
9081 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9082 status = i40e_get_link_status(&pf->hw, &new_link);
9084 /* On success, disable temp link polling */
9085 if (status == I40E_SUCCESS) {
9086 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9088 /* Enable link polling temporarily until i40e_get_link_status
9089 * returns I40E_SUCCESS
9091 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9092 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9097 old_link_speed = pf->hw.phy.link_info_old.link_speed;
9098 new_link_speed = pf->hw.phy.link_info.link_speed;
9100 if (new_link == old_link &&
9101 new_link_speed == old_link_speed &&
9102 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9103 new_link == netif_carrier_ok(vsi->netdev)))
9106 i40e_print_link_message(vsi, new_link);
9108 /* Notify the base of the switch tree connected to
9109 * the link. Floating VEBs are not notified.
9111 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9112 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9114 i40e_vsi_link_event(vsi, new_link);
9117 i40e_vc_notify_link_state(pf);
9119 if (pf->flags & I40E_FLAG_PTP)
9120 i40e_ptp_set_increment(pf);
9124 * i40e_watchdog_subtask - periodic checks not using event driven response
9125 * @pf: board private structure
9127 static void i40e_watchdog_subtask(struct i40e_pf *pf)
9131 /* if interface is down do nothing */
9132 if (test_bit(__I40E_DOWN, pf->state) ||
9133 test_bit(__I40E_CONFIG_BUSY, pf->state))
9136 /* make sure we don't do these things too often */
9137 if (time_before(jiffies, (pf->service_timer_previous +
9138 pf->service_timer_period)))
9140 pf->service_timer_previous = jiffies;
9142 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
9143 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9144 i40e_link_event(pf);
9146 /* Update the stats for active netdevs so the network stack
9147 * can look at updated numbers whenever it cares to
9149 for (i = 0; i < pf->num_alloc_vsi; i++)
9150 if (pf->vsi[i] && pf->vsi[i]->netdev)
9151 i40e_update_stats(pf->vsi[i]);
9153 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
9154 /* Update the stats for the active switching components */
9155 for (i = 0; i < I40E_MAX_VEB; i++)
9157 i40e_update_veb_stats(pf->veb[i]);
9160 i40e_ptp_rx_hang(pf);
9161 i40e_ptp_tx_hang(pf);
9165 * i40e_reset_subtask - Set up for resetting the device and driver
9166 * @pf: board private structure
9168 static void i40e_reset_subtask(struct i40e_pf *pf)
9170 u32 reset_flags = 0;
9172 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
9173 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
9174 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
9176 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
9177 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
9178 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9180 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
9181 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
9182 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
9184 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
9185 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
9186 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
9188 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
9189 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
9190 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
9193 /* If there's a recovery already waiting, it takes
9194 * precedence before starting a new reset sequence.
9196 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
9197 i40e_prep_for_reset(pf, false);
9199 i40e_rebuild(pf, false, false);
9202 /* If we're already down or resetting, just bail */
9204 !test_bit(__I40E_DOWN, pf->state) &&
9205 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
9206 i40e_do_reset(pf, reset_flags, false);
9211 * i40e_handle_link_event - Handle link event
9212 * @pf: board private structure
9213 * @e: event info posted on ARQ
9215 static void i40e_handle_link_event(struct i40e_pf *pf,
9216 struct i40e_arq_event_info *e)
9218 struct i40e_aqc_get_link_status *status =
9219 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
9221 /* Do a new status request to re-enable LSE reporting
9222 * and load new status information into the hw struct
9223 * This completely ignores any state information
9224 * in the ARQ event info, instead choosing to always
9225 * issue the AQ update link status command.
9227 i40e_link_event(pf);
9229 /* Check if module meets thermal requirements */
9230 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
9231 dev_err(&pf->pdev->dev,
9232 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
9233 dev_err(&pf->pdev->dev,
9234 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9236 /* check for unqualified module, if link is down, suppress
9237 * the message if link was forced to be down.
9239 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
9240 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
9241 (!(status->link_info & I40E_AQ_LINK_UP)) &&
9242 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
9243 dev_err(&pf->pdev->dev,
9244 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
9245 dev_err(&pf->pdev->dev,
9246 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9252 * i40e_clean_adminq_subtask - Clean the AdminQ rings
9253 * @pf: board private structure
9255 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
9257 struct i40e_arq_event_info event;
9258 struct i40e_hw *hw = &pf->hw;
9265 /* Do not run clean AQ when PF reset fails */
9266 if (test_bit(__I40E_RESET_FAILED, pf->state))
9269 /* check for error indications */
9270 val = rd32(&pf->hw, pf->hw.aq.arq.len);
9272 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
9273 if (hw->debug_mask & I40E_DEBUG_AQ)
9274 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
9275 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
9277 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
9278 if (hw->debug_mask & I40E_DEBUG_AQ)
9279 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
9280 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
9281 pf->arq_overflows++;
9283 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
9284 if (hw->debug_mask & I40E_DEBUG_AQ)
9285 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
9286 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
9289 wr32(&pf->hw, pf->hw.aq.arq.len, val);
9291 val = rd32(&pf->hw, pf->hw.aq.asq.len);
9293 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
9294 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9295 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
9296 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
9298 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
9299 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9300 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
9301 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
9303 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
9304 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9305 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
9306 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
9309 wr32(&pf->hw, pf->hw.aq.asq.len, val);
9311 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
9312 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
9317 ret = i40e_clean_arq_element(hw, &event, &pending);
9318 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
9321 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
9325 opcode = le16_to_cpu(event.desc.opcode);
9328 case i40e_aqc_opc_get_link_status:
9329 i40e_handle_link_event(pf, &event);
9331 case i40e_aqc_opc_send_msg_to_pf:
9332 ret = i40e_vc_process_vf_msg(pf,
9333 le16_to_cpu(event.desc.retval),
9334 le32_to_cpu(event.desc.cookie_high),
9335 le32_to_cpu(event.desc.cookie_low),
9339 case i40e_aqc_opc_lldp_update_mib:
9340 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
9341 #ifdef CONFIG_I40E_DCB
9343 ret = i40e_handle_lldp_event(pf, &event);
9345 #endif /* CONFIG_I40E_DCB */
9347 case i40e_aqc_opc_event_lan_overflow:
9348 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
9349 i40e_handle_lan_overflow_event(pf, &event);
9351 case i40e_aqc_opc_send_msg_to_peer:
9352 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
9354 case i40e_aqc_opc_nvm_erase:
9355 case i40e_aqc_opc_nvm_update:
9356 case i40e_aqc_opc_oem_post_update:
9357 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
9358 "ARQ NVM operation 0x%04x completed\n",
9362 dev_info(&pf->pdev->dev,
9363 "ARQ: Unknown event 0x%04x ignored\n",
9367 } while (i++ < pf->adminq_work_limit);
9369 if (i < pf->adminq_work_limit)
9370 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
9372 /* re-enable Admin queue interrupt cause */
9373 val = rd32(hw, I40E_PFINT_ICR0_ENA);
9374 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
9375 wr32(hw, I40E_PFINT_ICR0_ENA, val);
9378 kfree(event.msg_buf);
9382 * i40e_verify_eeprom - make sure eeprom is good to use
9383 * @pf: board private structure
9385 static void i40e_verify_eeprom(struct i40e_pf *pf)
9389 err = i40e_diag_eeprom_test(&pf->hw);
9391 /* retry in case of garbage read */
9392 err = i40e_diag_eeprom_test(&pf->hw);
9394 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
9396 set_bit(__I40E_BAD_EEPROM, pf->state);
9400 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
9401 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
9402 clear_bit(__I40E_BAD_EEPROM, pf->state);
9407 * i40e_enable_pf_switch_lb
9408 * @pf: pointer to the PF structure
9410 * enable switch loop back or die - no point in a return value
9412 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
9414 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9415 struct i40e_vsi_context ctxt;
9418 ctxt.seid = pf->main_vsi_seid;
9419 ctxt.pf_num = pf->hw.pf_id;
9421 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9423 dev_info(&pf->pdev->dev,
9424 "couldn't get PF vsi config, err %s aq_err %s\n",
9425 i40e_stat_str(&pf->hw, ret),
9426 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9429 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9430 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9431 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9433 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9435 dev_info(&pf->pdev->dev,
9436 "update vsi switch failed, err %s aq_err %s\n",
9437 i40e_stat_str(&pf->hw, ret),
9438 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9443 * i40e_disable_pf_switch_lb
9444 * @pf: pointer to the PF structure
9446 * disable switch loop back or die - no point in a return value
9448 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
9450 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9451 struct i40e_vsi_context ctxt;
9454 ctxt.seid = pf->main_vsi_seid;
9455 ctxt.pf_num = pf->hw.pf_id;
9457 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9459 dev_info(&pf->pdev->dev,
9460 "couldn't get PF vsi config, err %s aq_err %s\n",
9461 i40e_stat_str(&pf->hw, ret),
9462 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9465 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9466 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9467 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9469 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9471 dev_info(&pf->pdev->dev,
9472 "update vsi switch failed, err %s aq_err %s\n",
9473 i40e_stat_str(&pf->hw, ret),
9474 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9479 * i40e_config_bridge_mode - Configure the HW bridge mode
9480 * @veb: pointer to the bridge instance
9482 * Configure the loop back mode for the LAN VSI that is downlink to the
9483 * specified HW bridge instance. It is expected this function is called
9484 * when a new HW bridge is instantiated.
9486 static void i40e_config_bridge_mode(struct i40e_veb *veb)
9488 struct i40e_pf *pf = veb->pf;
9490 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
9491 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
9492 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9493 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
9494 i40e_disable_pf_switch_lb(pf);
9496 i40e_enable_pf_switch_lb(pf);
9500 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
9501 * @veb: pointer to the VEB instance
9503 * This is a recursive function that first builds the attached VSIs then
9504 * recurses in to build the next layer of VEB. We track the connections
9505 * through our own index numbers because the seid's from the HW could
9506 * change across the reset.
9508 static int i40e_reconstitute_veb(struct i40e_veb *veb)
9510 struct i40e_vsi *ctl_vsi = NULL;
9511 struct i40e_pf *pf = veb->pf;
9515 /* build VSI that owns this VEB, temporarily attached to base VEB */
9516 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
9518 pf->vsi[v]->veb_idx == veb->idx &&
9519 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
9520 ctl_vsi = pf->vsi[v];
9525 dev_info(&pf->pdev->dev,
9526 "missing owner VSI for veb_idx %d\n", veb->idx);
9528 goto end_reconstitute;
9530 if (ctl_vsi != pf->vsi[pf->lan_vsi])
9531 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
9532 ret = i40e_add_vsi(ctl_vsi);
9534 dev_info(&pf->pdev->dev,
9535 "rebuild of veb_idx %d owner VSI failed: %d\n",
9537 goto end_reconstitute;
9539 i40e_vsi_reset_stats(ctl_vsi);
9541 /* create the VEB in the switch and move the VSI onto the VEB */
9542 ret = i40e_add_veb(veb, ctl_vsi);
9544 goto end_reconstitute;
9546 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
9547 veb->bridge_mode = BRIDGE_MODE_VEB;
9549 veb->bridge_mode = BRIDGE_MODE_VEPA;
9550 i40e_config_bridge_mode(veb);
9552 /* create the remaining VSIs attached to this VEB */
9553 for (v = 0; v < pf->num_alloc_vsi; v++) {
9554 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
9557 if (pf->vsi[v]->veb_idx == veb->idx) {
9558 struct i40e_vsi *vsi = pf->vsi[v];
9560 vsi->uplink_seid = veb->seid;
9561 ret = i40e_add_vsi(vsi);
9563 dev_info(&pf->pdev->dev,
9564 "rebuild of vsi_idx %d failed: %d\n",
9566 goto end_reconstitute;
9568 i40e_vsi_reset_stats(vsi);
9572 /* create any VEBs attached to this VEB - RECURSION */
9573 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9574 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
9575 pf->veb[veb_idx]->uplink_seid = veb->seid;
9576 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
9587 * i40e_get_capabilities - get info about the HW
9588 * @pf: the PF struct
9590 static int i40e_get_capabilities(struct i40e_pf *pf,
9591 enum i40e_admin_queue_opc list_type)
9593 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
9598 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
9600 cap_buf = kzalloc(buf_len, GFP_KERNEL);
9604 /* this loads the data into the hw struct for us */
9605 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
9606 &data_size, list_type,
9608 /* data loaded, buffer no longer needed */
9611 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
9612 /* retry with a larger buffer */
9613 buf_len = data_size;
9614 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
9615 dev_info(&pf->pdev->dev,
9616 "capability discovery failed, err %s aq_err %s\n",
9617 i40e_stat_str(&pf->hw, err),
9618 i40e_aq_str(&pf->hw,
9619 pf->hw.aq.asq_last_status));
9624 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
9625 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9626 dev_info(&pf->pdev->dev,
9627 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
9628 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
9629 pf->hw.func_caps.num_msix_vectors,
9630 pf->hw.func_caps.num_msix_vectors_vf,
9631 pf->hw.func_caps.fd_filters_guaranteed,
9632 pf->hw.func_caps.fd_filters_best_effort,
9633 pf->hw.func_caps.num_tx_qp,
9634 pf->hw.func_caps.num_vsis);
9635 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
9636 dev_info(&pf->pdev->dev,
9637 "switch_mode=0x%04x, function_valid=0x%08x\n",
9638 pf->hw.dev_caps.switch_mode,
9639 pf->hw.dev_caps.valid_functions);
9640 dev_info(&pf->pdev->dev,
9641 "SR-IOV=%d, num_vfs for all function=%u\n",
9642 pf->hw.dev_caps.sr_iov_1_1,
9643 pf->hw.dev_caps.num_vfs);
9644 dev_info(&pf->pdev->dev,
9645 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
9646 pf->hw.dev_caps.num_vsis,
9647 pf->hw.dev_caps.num_rx_qp,
9648 pf->hw.dev_caps.num_tx_qp);
9651 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9652 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
9653 + pf->hw.func_caps.num_vfs)
9654 if (pf->hw.revision_id == 0 &&
9655 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
9656 dev_info(&pf->pdev->dev,
9657 "got num_vsis %d, setting num_vsis to %d\n",
9658 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
9659 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
9665 static int i40e_vsi_clear(struct i40e_vsi *vsi);
9668 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
9669 * @pf: board private structure
9671 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
9673 struct i40e_vsi *vsi;
9675 /* quick workaround for an NVM issue that leaves a critical register
9678 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
9679 static const u32 hkey[] = {
9680 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
9681 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
9682 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
9686 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
9687 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
9690 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
9693 /* find existing VSI and see if it needs configuring */
9694 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9696 /* create a new VSI if none exists */
9698 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
9699 pf->vsi[pf->lan_vsi]->seid, 0);
9701 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
9702 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9703 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
9708 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
9712 * i40e_fdir_teardown - release the Flow Director resources
9713 * @pf: board private structure
9715 static void i40e_fdir_teardown(struct i40e_pf *pf)
9717 struct i40e_vsi *vsi;
9719 i40e_fdir_filter_exit(pf);
9720 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9722 i40e_vsi_release(vsi);
9726 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
9728 * @seid: seid of main or channel VSIs
9730 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
9731 * existed before reset
9733 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
9735 struct i40e_cloud_filter *cfilter;
9736 struct i40e_pf *pf = vsi->back;
9737 struct hlist_node *node;
9740 /* Add cloud filters back if they exist */
9741 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
9743 if (cfilter->seid != seid)
9746 if (cfilter->dst_port)
9747 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
9750 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
9753 dev_dbg(&pf->pdev->dev,
9754 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9755 i40e_stat_str(&pf->hw, ret),
9756 i40e_aq_str(&pf->hw,
9757 pf->hw.aq.asq_last_status));
9765 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
9768 * Rebuilds channel VSIs if they existed before reset
9770 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
9772 struct i40e_channel *ch, *ch_tmp;
9775 if (list_empty(&vsi->ch_list))
9778 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
9779 if (!ch->initialized)
9781 /* Proceed with creation of channel (VMDq2) VSI */
9782 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
9784 dev_info(&vsi->back->pdev->dev,
9785 "failed to rebuild channels using uplink_seid %u\n",
9789 /* Reconfigure TX queues using QTX_CTL register */
9790 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
9792 dev_info(&vsi->back->pdev->dev,
9793 "failed to configure TX rings for channel %u\n",
9797 /* update 'next_base_queue' */
9798 vsi->next_base_queue = vsi->next_base_queue +
9799 ch->num_queue_pairs;
9800 if (ch->max_tx_rate) {
9801 u64 credits = ch->max_tx_rate;
9803 if (i40e_set_bw_limit(vsi, ch->seid,
9807 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9808 dev_dbg(&vsi->back->pdev->dev,
9809 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9814 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
9816 dev_dbg(&vsi->back->pdev->dev,
9817 "Failed to rebuild cloud filters for channel VSI %u\n",
9826 * i40e_prep_for_reset - prep for the core to reset
9827 * @pf: board private structure
9828 * @lock_acquired: indicates whether or not the lock has been acquired
9829 * before this function was called.
9831 * Close up the VFs and other things in prep for PF Reset.
9833 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
9835 struct i40e_hw *hw = &pf->hw;
9836 i40e_status ret = 0;
9839 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
9840 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9842 if (i40e_check_asq_alive(&pf->hw))
9843 i40e_vc_notify_reset(pf);
9845 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
9847 /* quiesce the VSIs and their queues that are not already DOWN */
9848 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
9851 i40e_pf_quiesce_all_vsi(pf);
9855 for (v = 0; v < pf->num_alloc_vsi; v++) {
9857 pf->vsi[v]->seid = 0;
9860 i40e_shutdown_adminq(&pf->hw);
9862 /* call shutdown HMC */
9863 if (hw->hmc.hmc_obj) {
9864 ret = i40e_shutdown_lan_hmc(hw);
9866 dev_warn(&pf->pdev->dev,
9867 "shutdown_lan_hmc failed: %d\n", ret);
9870 /* Save the current PTP time so that we can restore the time after the
9873 i40e_ptp_save_hw_time(pf);
9877 * i40e_send_version - update firmware with driver version
9880 static void i40e_send_version(struct i40e_pf *pf)
9882 struct i40e_driver_version dv;
9884 dv.major_version = 0xff;
9885 dv.minor_version = 0xff;
9886 dv.build_version = 0xff;
9887 dv.subbuild_version = 0;
9888 strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
9889 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
9893 * i40e_get_oem_version - get OEM specific version information
9894 * @hw: pointer to the hardware structure
9896 static void i40e_get_oem_version(struct i40e_hw *hw)
9898 u16 block_offset = 0xffff;
9899 u16 block_length = 0;
9900 u16 capabilities = 0;
9904 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9905 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9906 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9907 #define I40E_NVM_OEM_GEN_OFFSET 0x02
9908 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9909 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9910 #define I40E_NVM_OEM_LENGTH 3
9912 /* Check if pointer to OEM version block is valid. */
9913 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
9914 if (block_offset == 0xffff)
9917 /* Check if OEM version block has correct length. */
9918 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
9920 if (block_length < I40E_NVM_OEM_LENGTH)
9923 /* Check if OEM version format is as expected. */
9924 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
9926 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
9929 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
9931 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
9933 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
9934 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
9938 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
9939 * @pf: board private structure
9941 static int i40e_reset(struct i40e_pf *pf)
9943 struct i40e_hw *hw = &pf->hw;
9946 ret = i40e_pf_reset(hw);
9948 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
9949 set_bit(__I40E_RESET_FAILED, pf->state);
9950 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9958 * i40e_rebuild - rebuild using a saved config
9959 * @pf: board private structure
9960 * @reinit: if the Main VSI needs to re-initialized.
9961 * @lock_acquired: indicates whether or not the lock has been acquired
9962 * before this function was called.
9964 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
9966 int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
9967 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9968 struct i40e_hw *hw = &pf->hw;
9969 u8 set_fc_aq_fail = 0;
9974 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
9975 i40e_check_recovery_mode(pf)) {
9976 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
9979 if (test_bit(__I40E_DOWN, pf->state) &&
9980 !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
9981 !old_recovery_mode_bit)
9982 goto clear_recovery;
9983 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
9985 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
9986 ret = i40e_init_adminq(&pf->hw);
9988 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
9989 i40e_stat_str(&pf->hw, ret),
9990 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9991 goto clear_recovery;
9993 i40e_get_oem_version(&pf->hw);
9995 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
9996 ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
9997 hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
9998 /* The following delay is necessary for 4.33 firmware and older
9999 * to recover after EMP reset. 200 ms should suffice but we
10000 * put here 300 ms to be sure that FW is ready to operate
10006 /* re-verify the eeprom if we just had an EMP reset */
10007 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
10008 i40e_verify_eeprom(pf);
10010 /* if we are going out of or into recovery mode we have to act
10011 * accordingly with regard to resources initialization
10012 * and deinitialization
10014 if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
10015 old_recovery_mode_bit) {
10016 if (i40e_get_capabilities(pf,
10017 i40e_aqc_opc_list_func_capabilities))
10020 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10021 /* we're staying in recovery mode so we'll reinitialize
10024 if (i40e_setup_misc_vector_for_recovery_mode(pf))
10027 if (!lock_acquired)
10029 /* we're going out of recovery mode so we'll free
10030 * the IRQ allocated specifically for recovery mode
10031 * and restore the interrupt scheme
10033 free_irq(pf->pdev->irq, pf);
10034 i40e_clear_interrupt_scheme(pf);
10035 if (i40e_restore_interrupt_scheme(pf))
10039 /* tell the firmware that we're starting */
10040 i40e_send_version(pf);
10042 /* bail out in case recovery mode was detected, as there is
10043 * no need for further configuration.
10048 i40e_clear_pxe_mode(hw);
10049 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10051 goto end_core_reset;
10053 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10054 hw->func_caps.num_rx_qp, 0, 0);
10056 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10057 goto end_core_reset;
10059 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10061 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10062 goto end_core_reset;
10065 /* Enable FW to write a default DCB config on link-up */
10066 i40e_aq_set_dcb_parameters(hw, true, NULL);
10068 #ifdef CONFIG_I40E_DCB
10069 ret = i40e_init_pf_dcb(pf);
10071 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
10072 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10073 /* Continue without DCB enabled */
10075 #endif /* CONFIG_I40E_DCB */
10076 /* do basic switch setup */
10077 if (!lock_acquired)
10079 ret = i40e_setup_pf_switch(pf, reinit);
10083 /* The driver only wants link up/down and module qualification
10084 * reports from firmware. Note the negative logic.
10086 ret = i40e_aq_set_phy_int_mask(&pf->hw,
10087 ~(I40E_AQ_EVENT_LINK_UPDOWN |
10088 I40E_AQ_EVENT_MEDIA_NA |
10089 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10091 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10092 i40e_stat_str(&pf->hw, ret),
10093 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10095 /* make sure our flow control settings are restored */
10096 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
10098 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
10099 i40e_stat_str(&pf->hw, ret),
10100 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10102 /* Rebuild the VSIs and VEBs that existed before reset.
10103 * They are still in our local switch element arrays, so only
10104 * need to rebuild the switch model in the HW.
10106 * If there were VEBs but the reconstitution failed, we'll try
10107 * try to recover minimal use by getting the basic PF VSI working.
10109 if (vsi->uplink_seid != pf->mac_seid) {
10110 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10111 /* find the one VEB connected to the MAC, and find orphans */
10112 for (v = 0; v < I40E_MAX_VEB; v++) {
10116 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10117 pf->veb[v]->uplink_seid == 0) {
10118 ret = i40e_reconstitute_veb(pf->veb[v]);
10123 /* If Main VEB failed, we're in deep doodoo,
10124 * so give up rebuilding the switch and set up
10125 * for minimal rebuild of PF VSI.
10126 * If orphan failed, we'll report the error
10127 * but try to keep going.
10129 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
10130 dev_info(&pf->pdev->dev,
10131 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
10133 vsi->uplink_seid = pf->mac_seid;
10135 } else if (pf->veb[v]->uplink_seid == 0) {
10136 dev_info(&pf->pdev->dev,
10137 "rebuild of orphan VEB failed: %d\n",
10144 if (vsi->uplink_seid == pf->mac_seid) {
10145 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
10146 /* no VEB, so rebuild only the Main VSI */
10147 ret = i40e_add_vsi(vsi);
10149 dev_info(&pf->pdev->dev,
10150 "rebuild of Main VSI failed: %d\n", ret);
10155 if (vsi->mqprio_qopt.max_rate[0]) {
10156 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
10159 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
10160 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
10164 credits = max_tx_rate;
10165 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10166 dev_dbg(&vsi->back->pdev->dev,
10167 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10173 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
10177 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
10178 * for this main VSI if they exist
10180 ret = i40e_rebuild_channels(vsi);
10184 /* Reconfigure hardware for allowing smaller MSS in the case
10185 * of TSO, so that we avoid the MDD being fired and causing
10186 * a reset in the case of small MSS+TSO.
10188 #define I40E_REG_MSS 0x000E64DC
10189 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
10190 #define I40E_64BYTE_MSS 0x400000
10191 val = rd32(hw, I40E_REG_MSS);
10192 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
10193 val &= ~I40E_REG_MSS_MIN_MASK;
10194 val |= I40E_64BYTE_MSS;
10195 wr32(hw, I40E_REG_MSS, val);
10198 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
10200 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10202 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10203 i40e_stat_str(&pf->hw, ret),
10204 i40e_aq_str(&pf->hw,
10205 pf->hw.aq.asq_last_status));
10207 /* reinit the misc interrupt */
10208 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10209 ret = i40e_setup_misc_vector(pf);
10211 /* Add a filter to drop all Flow control frames from any VSI from being
10212 * transmitted. By doing so we stop a malicious VF from sending out
10213 * PAUSE or PFC frames and potentially controlling traffic for other
10215 * The FW can still send Flow control frames if enabled.
10217 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
10218 pf->main_vsi_seid);
10220 /* restart the VSIs that were rebuilt and running before the reset */
10221 i40e_pf_unquiesce_all_vsi(pf);
10223 /* Release the RTNL lock before we start resetting VFs */
10224 if (!lock_acquired)
10227 /* Restore promiscuous settings */
10228 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
10230 dev_warn(&pf->pdev->dev,
10231 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
10232 pf->cur_promisc ? "on" : "off",
10233 i40e_stat_str(&pf->hw, ret),
10234 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10236 i40e_reset_all_vfs(pf, true);
10238 /* tell the firmware that we're starting */
10239 i40e_send_version(pf);
10241 /* We've already released the lock, so don't do it again */
10242 goto end_core_reset;
10245 if (!lock_acquired)
10248 clear_bit(__I40E_RESET_FAILED, pf->state);
10250 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10251 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
10255 * i40e_reset_and_rebuild - reset and rebuild using a saved config
10256 * @pf: board private structure
10257 * @reinit: if the Main VSI needs to re-initialized.
10258 * @lock_acquired: indicates whether or not the lock has been acquired
10259 * before this function was called.
10261 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
10262 bool lock_acquired)
10265 /* Now we wait for GRST to settle out.
10266 * We don't have to delete the VEBs or VSIs from the hw switch
10267 * because the reset will make them disappear.
10269 ret = i40e_reset(pf);
10271 i40e_rebuild(pf, reinit, lock_acquired);
10275 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
10276 * @pf: board private structure
10278 * Close up the VFs and other things in prep for a Core Reset,
10279 * then get ready to rebuild the world.
10280 * @lock_acquired: indicates whether or not the lock has been acquired
10281 * before this function was called.
10283 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
10285 i40e_prep_for_reset(pf, lock_acquired);
10286 i40e_reset_and_rebuild(pf, false, lock_acquired);
10290 * i40e_handle_mdd_event
10291 * @pf: pointer to the PF structure
10293 * Called from the MDD irq handler to identify possibly malicious vfs
10295 static void i40e_handle_mdd_event(struct i40e_pf *pf)
10297 struct i40e_hw *hw = &pf->hw;
10298 bool mdd_detected = false;
10299 struct i40e_vf *vf;
10303 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
10306 /* find what triggered the MDD event */
10307 reg = rd32(hw, I40E_GL_MDET_TX);
10308 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
10309 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
10310 I40E_GL_MDET_TX_PF_NUM_SHIFT;
10311 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
10312 I40E_GL_MDET_TX_VF_NUM_SHIFT;
10313 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
10314 I40E_GL_MDET_TX_EVENT_SHIFT;
10315 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
10316 I40E_GL_MDET_TX_QUEUE_SHIFT) -
10317 pf->hw.func_caps.base_queue;
10318 if (netif_msg_tx_err(pf))
10319 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
10320 event, queue, pf_num, vf_num);
10321 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
10322 mdd_detected = true;
10324 reg = rd32(hw, I40E_GL_MDET_RX);
10325 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
10326 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
10327 I40E_GL_MDET_RX_FUNCTION_SHIFT;
10328 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
10329 I40E_GL_MDET_RX_EVENT_SHIFT;
10330 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
10331 I40E_GL_MDET_RX_QUEUE_SHIFT) -
10332 pf->hw.func_caps.base_queue;
10333 if (netif_msg_rx_err(pf))
10334 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
10335 event, queue, func);
10336 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
10337 mdd_detected = true;
10340 if (mdd_detected) {
10341 reg = rd32(hw, I40E_PF_MDET_TX);
10342 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
10343 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
10344 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
10346 reg = rd32(hw, I40E_PF_MDET_RX);
10347 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
10348 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
10349 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
10353 /* see if one of the VFs needs its hand slapped */
10354 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
10356 reg = rd32(hw, I40E_VP_MDET_TX(i));
10357 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
10358 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
10359 vf->num_mdd_events++;
10360 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
10362 dev_info(&pf->pdev->dev,
10363 "Use PF Control I/F to re-enable the VF\n");
10364 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10367 reg = rd32(hw, I40E_VP_MDET_RX(i));
10368 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
10369 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
10370 vf->num_mdd_events++;
10371 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
10373 dev_info(&pf->pdev->dev,
10374 "Use PF Control I/F to re-enable the VF\n");
10375 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10379 /* re-enable mdd interrupt cause */
10380 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
10381 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
10382 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
10383 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
10387 static const char *i40e_tunnel_name(u8 type)
10390 case UDP_TUNNEL_TYPE_VXLAN:
10392 case UDP_TUNNEL_TYPE_GENEVE:
10400 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
10401 * @pf: board private structure
10403 static void i40e_sync_udp_filters(struct i40e_pf *pf)
10407 /* loop through and set pending bit for all active UDP filters */
10408 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
10409 if (pf->udp_ports[i].port)
10410 pf->pending_udp_bitmap |= BIT_ULL(i);
10413 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
10417 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
10418 * @pf: board private structure
10420 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
10422 struct i40e_hw *hw = &pf->hw;
10423 u8 filter_index, type;
10427 if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
10430 /* acquire RTNL to maintain state of flags and port requests */
10433 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
10434 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
10435 struct i40e_udp_port_config *udp_port;
10436 i40e_status ret = 0;
10438 udp_port = &pf->udp_ports[i];
10439 pf->pending_udp_bitmap &= ~BIT_ULL(i);
10441 port = READ_ONCE(udp_port->port);
10442 type = READ_ONCE(udp_port->type);
10443 filter_index = READ_ONCE(udp_port->filter_index);
10445 /* release RTNL while we wait on AQ command */
10449 ret = i40e_aq_add_udp_tunnel(hw, port,
10453 else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
10454 ret = i40e_aq_del_udp_tunnel(hw, filter_index,
10457 /* reacquire RTNL so we can update filter_index */
10461 dev_info(&pf->pdev->dev,
10462 "%s %s port %d, index %d failed, err %s aq_err %s\n",
10463 i40e_tunnel_name(type),
10464 port ? "add" : "delete",
10467 i40e_stat_str(&pf->hw, ret),
10468 i40e_aq_str(&pf->hw,
10469 pf->hw.aq.asq_last_status));
10471 /* failed to add, just reset port,
10472 * drop pending bit for any deletion
10474 udp_port->port = 0;
10475 pf->pending_udp_bitmap &= ~BIT_ULL(i);
10478 /* record filter index on success */
10479 udp_port->filter_index = filter_index;
10488 * i40e_service_task - Run the driver's async subtasks
10489 * @work: pointer to work_struct containing our data
10491 static void i40e_service_task(struct work_struct *work)
10493 struct i40e_pf *pf = container_of(work,
10496 unsigned long start_time = jiffies;
10498 /* don't bother with service tasks if a reset is in progress */
10499 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
10500 test_bit(__I40E_SUSPENDED, pf->state))
10503 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
10506 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10507 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
10508 i40e_sync_filters_subtask(pf);
10509 i40e_reset_subtask(pf);
10510 i40e_handle_mdd_event(pf);
10511 i40e_vc_process_vflr_event(pf);
10512 i40e_watchdog_subtask(pf);
10513 i40e_fdir_reinit_subtask(pf);
10514 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
10515 /* Client subtask will reopen next time through. */
10516 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
10519 i40e_client_subtask(pf);
10520 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
10522 i40e_notify_client_of_l2_param_changes(
10523 pf->vsi[pf->lan_vsi]);
10525 i40e_sync_filters_subtask(pf);
10526 i40e_sync_udp_filters_subtask(pf);
10528 i40e_reset_subtask(pf);
10531 i40e_clean_adminq_subtask(pf);
10533 /* flush memory to make sure state is correct before next watchdog */
10534 smp_mb__before_atomic();
10535 clear_bit(__I40E_SERVICE_SCHED, pf->state);
10537 /* If the tasks have taken longer than one timer cycle or there
10538 * is more work to be done, reschedule the service task now
10539 * rather than wait for the timer to tick again.
10541 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
10542 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
10543 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
10544 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
10545 i40e_service_event_schedule(pf);
10549 * i40e_service_timer - timer callback
10550 * @data: pointer to PF struct
10552 static void i40e_service_timer(struct timer_list *t)
10554 struct i40e_pf *pf = from_timer(pf, t, service_timer);
10556 mod_timer(&pf->service_timer,
10557 round_jiffies(jiffies + pf->service_timer_period));
10558 i40e_service_event_schedule(pf);
10562 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
10563 * @vsi: the VSI being configured
10565 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
10567 struct i40e_pf *pf = vsi->back;
10569 switch (vsi->type) {
10570 case I40E_VSI_MAIN:
10571 vsi->alloc_queue_pairs = pf->num_lan_qps;
10572 if (!vsi->num_tx_desc)
10573 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10574 I40E_REQ_DESCRIPTOR_MULTIPLE);
10575 if (!vsi->num_rx_desc)
10576 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10577 I40E_REQ_DESCRIPTOR_MULTIPLE);
10578 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10579 vsi->num_q_vectors = pf->num_lan_msix;
10581 vsi->num_q_vectors = 1;
10585 case I40E_VSI_FDIR:
10586 vsi->alloc_queue_pairs = 1;
10587 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
10588 I40E_REQ_DESCRIPTOR_MULTIPLE);
10589 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
10590 I40E_REQ_DESCRIPTOR_MULTIPLE);
10591 vsi->num_q_vectors = pf->num_fdsb_msix;
10594 case I40E_VSI_VMDQ2:
10595 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
10596 if (!vsi->num_tx_desc)
10597 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10598 I40E_REQ_DESCRIPTOR_MULTIPLE);
10599 if (!vsi->num_rx_desc)
10600 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10601 I40E_REQ_DESCRIPTOR_MULTIPLE);
10602 vsi->num_q_vectors = pf->num_vmdq_msix;
10605 case I40E_VSI_SRIOV:
10606 vsi->alloc_queue_pairs = pf->num_vf_qps;
10607 if (!vsi->num_tx_desc)
10608 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10609 I40E_REQ_DESCRIPTOR_MULTIPLE);
10610 if (!vsi->num_rx_desc)
10611 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10612 I40E_REQ_DESCRIPTOR_MULTIPLE);
10624 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
10625 * @vsi: VSI pointer
10626 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
10628 * On error: returns error code (negative)
10629 * On success: returns 0
10631 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
10633 struct i40e_ring **next_rings;
10637 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
10638 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
10639 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
10640 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
10641 if (!vsi->tx_rings)
10643 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
10644 if (i40e_enabled_xdp_vsi(vsi)) {
10645 vsi->xdp_rings = next_rings;
10646 next_rings += vsi->alloc_queue_pairs;
10648 vsi->rx_rings = next_rings;
10650 if (alloc_qvectors) {
10651 /* allocate memory for q_vector pointers */
10652 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
10653 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
10654 if (!vsi->q_vectors) {
10662 kfree(vsi->tx_rings);
10667 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
10668 * @pf: board private structure
10669 * @type: type of VSI
10671 * On error: returns error code (negative)
10672 * On success: returns vsi index in PF (positive)
10674 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10677 struct i40e_vsi *vsi;
10681 /* Need to protect the allocation of the VSIs at the PF level */
10682 mutex_lock(&pf->switch_mutex);
10684 /* VSI list may be fragmented if VSI creation/destruction has
10685 * been happening. We can afford to do a quick scan to look
10686 * for any free VSIs in the list.
10688 * find next empty vsi slot, looping back around if necessary
10691 while (i < pf->num_alloc_vsi && pf->vsi[i])
10693 if (i >= pf->num_alloc_vsi) {
10695 while (i < pf->next_vsi && pf->vsi[i])
10699 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
10700 vsi_idx = i; /* Found one! */
10703 goto unlock_pf; /* out of VSI slots! */
10705 pf->next_vsi = ++i;
10707 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
10714 set_bit(__I40E_VSI_DOWN, vsi->state);
10716 vsi->idx = vsi_idx;
10717 vsi->int_rate_limit = 0;
10718 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
10719 pf->rss_table_size : 64;
10720 vsi->netdev_registered = false;
10721 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
10722 hash_init(vsi->mac_filter_hash);
10723 vsi->irqs_ready = false;
10725 if (type == I40E_VSI_MAIN) {
10726 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
10727 if (!vsi->af_xdp_zc_qps)
10731 ret = i40e_set_num_rings_in_vsi(vsi);
10735 ret = i40e_vsi_alloc_arrays(vsi, true);
10739 /* Setup default MSIX irq handler for VSI */
10740 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
10742 /* Initialize VSI lock */
10743 spin_lock_init(&vsi->mac_filter_hash_lock);
10744 pf->vsi[vsi_idx] = vsi;
10749 bitmap_free(vsi->af_xdp_zc_qps);
10750 pf->next_vsi = i - 1;
10753 mutex_unlock(&pf->switch_mutex);
10758 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
10759 * @vsi: VSI pointer
10760 * @free_qvectors: a bool to specify if q_vectors need to be freed.
10762 * On error: returns error code (negative)
10763 * On success: returns 0
10765 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
10767 /* free the ring and vector containers */
10768 if (free_qvectors) {
10769 kfree(vsi->q_vectors);
10770 vsi->q_vectors = NULL;
10772 kfree(vsi->tx_rings);
10773 vsi->tx_rings = NULL;
10774 vsi->rx_rings = NULL;
10775 vsi->xdp_rings = NULL;
10779 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
10781 * @vsi: Pointer to VSI structure
10783 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
10788 kfree(vsi->rss_hkey_user);
10789 vsi->rss_hkey_user = NULL;
10791 kfree(vsi->rss_lut_user);
10792 vsi->rss_lut_user = NULL;
10796 * i40e_vsi_clear - Deallocate the VSI provided
10797 * @vsi: the VSI being un-configured
10799 static int i40e_vsi_clear(struct i40e_vsi *vsi)
10801 struct i40e_pf *pf;
10810 mutex_lock(&pf->switch_mutex);
10811 if (!pf->vsi[vsi->idx]) {
10812 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
10813 vsi->idx, vsi->idx, vsi->type);
10817 if (pf->vsi[vsi->idx] != vsi) {
10818 dev_err(&pf->pdev->dev,
10819 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
10820 pf->vsi[vsi->idx]->idx,
10821 pf->vsi[vsi->idx]->type,
10822 vsi->idx, vsi->type);
10826 /* updates the PF for this cleared vsi */
10827 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10828 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
10830 bitmap_free(vsi->af_xdp_zc_qps);
10831 i40e_vsi_free_arrays(vsi, true);
10832 i40e_clear_rss_config_user(vsi);
10834 pf->vsi[vsi->idx] = NULL;
10835 if (vsi->idx < pf->next_vsi)
10836 pf->next_vsi = vsi->idx;
10839 mutex_unlock(&pf->switch_mutex);
10847 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
10848 * @vsi: the VSI being cleaned
10850 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
10854 if (vsi->tx_rings && vsi->tx_rings[0]) {
10855 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10856 kfree_rcu(vsi->tx_rings[i], rcu);
10857 WRITE_ONCE(vsi->tx_rings[i], NULL);
10858 WRITE_ONCE(vsi->rx_rings[i], NULL);
10859 if (vsi->xdp_rings)
10860 WRITE_ONCE(vsi->xdp_rings[i], NULL);
10866 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
10867 * @vsi: the VSI being configured
10869 static int i40e_alloc_rings(struct i40e_vsi *vsi)
10871 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
10872 struct i40e_pf *pf = vsi->back;
10873 struct i40e_ring *ring;
10875 /* Set basic values in the rings to be used later during open() */
10876 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10877 /* allocate space for both Tx and Rx in one shot */
10878 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
10882 ring->queue_index = i;
10883 ring->reg_idx = vsi->base_queue + i;
10884 ring->ring_active = false;
10886 ring->netdev = vsi->netdev;
10887 ring->dev = &pf->pdev->dev;
10888 ring->count = vsi->num_tx_desc;
10891 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10892 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10893 ring->itr_setting = pf->tx_itr_default;
10894 WRITE_ONCE(vsi->tx_rings[i], ring++);
10896 if (!i40e_enabled_xdp_vsi(vsi))
10899 ring->queue_index = vsi->alloc_queue_pairs + i;
10900 ring->reg_idx = vsi->base_queue + ring->queue_index;
10901 ring->ring_active = false;
10903 ring->netdev = NULL;
10904 ring->dev = &pf->pdev->dev;
10905 ring->count = vsi->num_tx_desc;
10908 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10909 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10910 set_ring_xdp(ring);
10911 ring->itr_setting = pf->tx_itr_default;
10912 WRITE_ONCE(vsi->xdp_rings[i], ring++);
10915 ring->queue_index = i;
10916 ring->reg_idx = vsi->base_queue + i;
10917 ring->ring_active = false;
10919 ring->netdev = vsi->netdev;
10920 ring->dev = &pf->pdev->dev;
10921 ring->count = vsi->num_rx_desc;
10924 ring->itr_setting = pf->rx_itr_default;
10925 WRITE_ONCE(vsi->rx_rings[i], ring);
10931 i40e_vsi_clear_rings(vsi);
10936 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
10937 * @pf: board private structure
10938 * @vectors: the number of MSI-X vectors to request
10940 * Returns the number of vectors reserved, or error
10942 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
10944 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
10945 I40E_MIN_MSIX, vectors);
10947 dev_info(&pf->pdev->dev,
10948 "MSI-X vector reservation failed: %d\n", vectors);
10956 * i40e_init_msix - Setup the MSIX capability
10957 * @pf: board private structure
10959 * Work with the OS to set up the MSIX vectors needed.
10961 * Returns the number of vectors reserved or negative on failure
10963 static int i40e_init_msix(struct i40e_pf *pf)
10965 struct i40e_hw *hw = &pf->hw;
10966 int cpus, extra_vectors;
10970 int iwarp_requested = 0;
10972 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10975 /* The number of vectors we'll request will be comprised of:
10976 * - Add 1 for "other" cause for Admin Queue events, etc.
10977 * - The number of LAN queue pairs
10978 * - Queues being used for RSS.
10979 * We don't need as many as max_rss_size vectors.
10980 * use rss_size instead in the calculation since that
10981 * is governed by number of cpus in the system.
10982 * - assumes symmetric Tx/Rx pairing
10983 * - The number of VMDq pairs
10984 * - The CPU count within the NUMA node if iWARP is enabled
10985 * Once we count this up, try the request.
10987 * If we can't get what we want, we'll simplify to nearly nothing
10988 * and try again. If that still fails, we punt.
10990 vectors_left = hw->func_caps.num_msix_vectors;
10993 /* reserve one vector for miscellaneous handler */
10994 if (vectors_left) {
10999 /* reserve some vectors for the main PF traffic queues. Initially we
11000 * only reserve at most 50% of the available vectors, in the case that
11001 * the number of online CPUs is large. This ensures that we can enable
11002 * extra features as well. Once we've enabled the other features, we
11003 * will use any remaining vectors to reach as close as we can to the
11004 * number of online CPUs.
11006 cpus = num_online_cpus();
11007 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
11008 vectors_left -= pf->num_lan_msix;
11010 /* reserve one vector for sideband flow director */
11011 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11012 if (vectors_left) {
11013 pf->num_fdsb_msix = 1;
11017 pf->num_fdsb_msix = 0;
11021 /* can we reserve enough for iWARP? */
11022 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11023 iwarp_requested = pf->num_iwarp_msix;
11026 pf->num_iwarp_msix = 0;
11027 else if (vectors_left < pf->num_iwarp_msix)
11028 pf->num_iwarp_msix = 1;
11029 v_budget += pf->num_iwarp_msix;
11030 vectors_left -= pf->num_iwarp_msix;
11033 /* any vectors left over go for VMDq support */
11034 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
11035 if (!vectors_left) {
11036 pf->num_vmdq_msix = 0;
11037 pf->num_vmdq_qps = 0;
11039 int vmdq_vecs_wanted =
11040 pf->num_vmdq_vsis * pf->num_vmdq_qps;
11042 min_t(int, vectors_left, vmdq_vecs_wanted);
11044 /* if we're short on vectors for what's desired, we limit
11045 * the queues per vmdq. If this is still more than are
11046 * available, the user will need to change the number of
11047 * queues/vectors used by the PF later with the ethtool
11050 if (vectors_left < vmdq_vecs_wanted) {
11051 pf->num_vmdq_qps = 1;
11052 vmdq_vecs_wanted = pf->num_vmdq_vsis;
11053 vmdq_vecs = min_t(int,
11057 pf->num_vmdq_msix = pf->num_vmdq_qps;
11059 v_budget += vmdq_vecs;
11060 vectors_left -= vmdq_vecs;
11064 /* On systems with a large number of SMP cores, we previously limited
11065 * the number of vectors for num_lan_msix to be at most 50% of the
11066 * available vectors, to allow for other features. Now, we add back
11067 * the remaining vectors. However, we ensure that the total
11068 * num_lan_msix will not exceed num_online_cpus(). To do this, we
11069 * calculate the number of vectors we can add without going over the
11070 * cap of CPUs. For systems with a small number of CPUs this will be
11073 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11074 pf->num_lan_msix += extra_vectors;
11075 vectors_left -= extra_vectors;
11077 WARN(vectors_left < 0,
11078 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11080 v_budget += pf->num_lan_msix;
11081 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11083 if (!pf->msix_entries)
11086 for (i = 0; i < v_budget; i++)
11087 pf->msix_entries[i].entry = i;
11088 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11090 if (v_actual < I40E_MIN_MSIX) {
11091 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
11092 kfree(pf->msix_entries);
11093 pf->msix_entries = NULL;
11094 pci_disable_msix(pf->pdev);
11097 } else if (v_actual == I40E_MIN_MSIX) {
11098 /* Adjust for minimal MSIX use */
11099 pf->num_vmdq_vsis = 0;
11100 pf->num_vmdq_qps = 0;
11101 pf->num_lan_qps = 1;
11102 pf->num_lan_msix = 1;
11104 } else if (v_actual != v_budget) {
11105 /* If we have limited resources, we will start with no vectors
11106 * for the special features and then allocate vectors to some
11107 * of these features based on the policy and at the end disable
11108 * the features that did not get any vectors.
11112 dev_info(&pf->pdev->dev,
11113 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11114 v_actual, v_budget);
11115 /* reserve the misc vector */
11116 vec = v_actual - 1;
11118 /* Scale vector usage down */
11119 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
11120 pf->num_vmdq_vsis = 1;
11121 pf->num_vmdq_qps = 1;
11123 /* partition out the remaining vectors */
11126 pf->num_lan_msix = 1;
11129 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11130 pf->num_lan_msix = 1;
11131 pf->num_iwarp_msix = 1;
11133 pf->num_lan_msix = 2;
11137 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11138 pf->num_iwarp_msix = min_t(int, (vec / 3),
11140 pf->num_vmdq_vsis = min_t(int, (vec / 3),
11141 I40E_DEFAULT_NUM_VMDQ_VSI);
11143 pf->num_vmdq_vsis = min_t(int, (vec / 2),
11144 I40E_DEFAULT_NUM_VMDQ_VSI);
11146 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11147 pf->num_fdsb_msix = 1;
11150 pf->num_lan_msix = min_t(int,
11151 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11153 pf->num_lan_qps = pf->num_lan_msix;
11158 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
11159 (pf->num_fdsb_msix == 0)) {
11160 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11161 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11162 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11164 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11165 (pf->num_vmdq_msix == 0)) {
11166 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11167 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
11170 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
11171 (pf->num_iwarp_msix == 0)) {
11172 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11173 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11175 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11176 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11178 pf->num_vmdq_msix * pf->num_vmdq_vsis,
11180 pf->num_iwarp_msix);
11186 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
11187 * @vsi: the VSI being configured
11188 * @v_idx: index of the vector in the vsi struct
11189 * @cpu: cpu to be used on affinity_mask
11191 * We allocate one q_vector. If allocation fails we return -ENOMEM.
11193 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
11195 struct i40e_q_vector *q_vector;
11197 /* allocate q_vector */
11198 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11202 q_vector->vsi = vsi;
11203 q_vector->v_idx = v_idx;
11204 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11207 netif_napi_add(vsi->netdev, &q_vector->napi,
11208 i40e_napi_poll, NAPI_POLL_WEIGHT);
11210 /* tie q_vector and vsi together */
11211 vsi->q_vectors[v_idx] = q_vector;
11217 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
11218 * @vsi: the VSI being configured
11220 * We allocate one q_vector per queue interrupt. If allocation fails we
11223 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11225 struct i40e_pf *pf = vsi->back;
11226 int err, v_idx, num_q_vectors, current_cpu;
11228 /* if not MSIX, give the one vector only to the LAN VSI */
11229 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11230 num_q_vectors = vsi->num_q_vectors;
11231 else if (vsi == pf->vsi[pf->lan_vsi])
11236 current_cpu = cpumask_first(cpu_online_mask);
11238 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
11239 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
11242 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
11243 if (unlikely(current_cpu >= nr_cpu_ids))
11244 current_cpu = cpumask_first(cpu_online_mask);
11251 i40e_free_q_vector(vsi, v_idx);
11257 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
11258 * @pf: board private structure to initialize
11260 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
11265 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11266 vectors = i40e_init_msix(pf);
11268 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
11269 I40E_FLAG_IWARP_ENABLED |
11270 I40E_FLAG_RSS_ENABLED |
11271 I40E_FLAG_DCB_CAPABLE |
11272 I40E_FLAG_DCB_ENABLED |
11273 I40E_FLAG_SRIOV_ENABLED |
11274 I40E_FLAG_FD_SB_ENABLED |
11275 I40E_FLAG_FD_ATR_ENABLED |
11276 I40E_FLAG_VMDQ_ENABLED);
11277 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11279 /* rework the queue expectations without MSIX */
11280 i40e_determine_queue_usage(pf);
11284 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11285 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
11286 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
11287 vectors = pci_enable_msi(pf->pdev);
11289 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
11291 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
11293 vectors = 1; /* one MSI or Legacy vector */
11296 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
11297 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
11299 /* set up vector assignment tracking */
11300 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
11301 pf->irq_pile = kzalloc(size, GFP_KERNEL);
11305 pf->irq_pile->num_entries = vectors;
11306 pf->irq_pile->search_hint = 0;
11308 /* track first vector for misc interrupts, ignore return */
11309 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
11315 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
11316 * @pf: private board data structure
11318 * Restore the interrupt scheme that was cleared when we suspended the
11319 * device. This should be called during resume to re-allocate the q_vectors
11320 * and reacquire IRQs.
11322 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
11326 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
11327 * scheme. We need to re-enabled them here in order to attempt to
11328 * re-acquire the MSI or MSI-X vectors
11330 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
11332 err = i40e_init_interrupt_scheme(pf);
11336 /* Now that we've re-acquired IRQs, we need to remap the vectors and
11337 * rings together again.
11339 for (i = 0; i < pf->num_alloc_vsi; i++) {
11341 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
11344 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
11348 err = i40e_setup_misc_vector(pf);
11352 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
11353 i40e_client_update_msix_info(pf);
11360 i40e_vsi_free_q_vectors(pf->vsi[i]);
11367 * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
11368 * non queue events in recovery mode
11369 * @pf: board private structure
11371 * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
11372 * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
11373 * This is handled differently than in recovery mode since no Tx/Rx resources
11374 * are being allocated.
11376 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
11380 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11381 err = i40e_setup_misc_vector(pf);
11384 dev_info(&pf->pdev->dev,
11385 "MSI-X misc vector request failed, error %d\n",
11390 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
11392 err = request_irq(pf->pdev->irq, i40e_intr, flags,
11396 dev_info(&pf->pdev->dev,
11397 "MSI/legacy misc vector request failed, error %d\n",
11401 i40e_enable_misc_int_causes(pf);
11402 i40e_irq_dynamic_enable_icr0(pf);
11409 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
11410 * @pf: board private structure
11412 * This sets up the handler for MSIX 0, which is used to manage the
11413 * non-queue interrupts, e.g. AdminQ and errors. This is not used
11414 * when in MSI or Legacy interrupt mode.
11416 static int i40e_setup_misc_vector(struct i40e_pf *pf)
11418 struct i40e_hw *hw = &pf->hw;
11421 /* Only request the IRQ once, the first time through. */
11422 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
11423 err = request_irq(pf->msix_entries[0].vector,
11424 i40e_intr, 0, pf->int_name, pf);
11426 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
11427 dev_info(&pf->pdev->dev,
11428 "request_irq for %s failed: %d\n",
11429 pf->int_name, err);
11434 i40e_enable_misc_int_causes(pf);
11436 /* associate no queues to the misc vector */
11437 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
11438 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
11442 i40e_irq_dynamic_enable_icr0(pf);
11448 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
11449 * @vsi: Pointer to vsi structure
11450 * @seed: Buffter to store the hash keys
11451 * @lut: Buffer to store the lookup table entries
11452 * @lut_size: Size of buffer to store the lookup table entries
11454 * Return 0 on success, negative on failure
11456 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
11457 u8 *lut, u16 lut_size)
11459 struct i40e_pf *pf = vsi->back;
11460 struct i40e_hw *hw = &pf->hw;
11464 ret = i40e_aq_get_rss_key(hw, vsi->id,
11465 (struct i40e_aqc_get_set_rss_key_data *)seed);
11467 dev_info(&pf->pdev->dev,
11468 "Cannot get RSS key, err %s aq_err %s\n",
11469 i40e_stat_str(&pf->hw, ret),
11470 i40e_aq_str(&pf->hw,
11471 pf->hw.aq.asq_last_status));
11477 bool pf_lut = vsi->type == I40E_VSI_MAIN;
11479 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
11481 dev_info(&pf->pdev->dev,
11482 "Cannot get RSS lut, err %s aq_err %s\n",
11483 i40e_stat_str(&pf->hw, ret),
11484 i40e_aq_str(&pf->hw,
11485 pf->hw.aq.asq_last_status));
11494 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
11495 * @vsi: Pointer to vsi structure
11496 * @seed: RSS hash seed
11497 * @lut: Lookup table
11498 * @lut_size: Lookup table size
11500 * Returns 0 on success, negative on failure
11502 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
11503 const u8 *lut, u16 lut_size)
11505 struct i40e_pf *pf = vsi->back;
11506 struct i40e_hw *hw = &pf->hw;
11507 u16 vf_id = vsi->vf_id;
11510 /* Fill out hash function seed */
11512 u32 *seed_dw = (u32 *)seed;
11514 if (vsi->type == I40E_VSI_MAIN) {
11515 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11516 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
11517 } else if (vsi->type == I40E_VSI_SRIOV) {
11518 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
11519 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
11521 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
11526 u32 *lut_dw = (u32 *)lut;
11528 if (vsi->type == I40E_VSI_MAIN) {
11529 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11531 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11532 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
11533 } else if (vsi->type == I40E_VSI_SRIOV) {
11534 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
11536 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
11537 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
11539 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
11548 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
11549 * @vsi: Pointer to VSI structure
11550 * @seed: Buffer to store the keys
11551 * @lut: Buffer to store the lookup table entries
11552 * @lut_size: Size of buffer to store the lookup table entries
11554 * Returns 0 on success, negative on failure
11556 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
11557 u8 *lut, u16 lut_size)
11559 struct i40e_pf *pf = vsi->back;
11560 struct i40e_hw *hw = &pf->hw;
11564 u32 *seed_dw = (u32 *)seed;
11566 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11567 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
11570 u32 *lut_dw = (u32 *)lut;
11572 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11574 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11575 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
11582 * i40e_config_rss - Configure RSS keys and lut
11583 * @vsi: Pointer to VSI structure
11584 * @seed: RSS hash seed
11585 * @lut: Lookup table
11586 * @lut_size: Lookup table size
11588 * Returns 0 on success, negative on failure
11590 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11592 struct i40e_pf *pf = vsi->back;
11594 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11595 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
11597 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
11601 * i40e_get_rss - Get RSS keys and lut
11602 * @vsi: Pointer to VSI structure
11603 * @seed: Buffer to store the keys
11604 * @lut: Buffer to store the lookup table entries
11605 * @lut_size: Size of buffer to store the lookup table entries
11607 * Returns 0 on success, negative on failure
11609 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11611 struct i40e_pf *pf = vsi->back;
11613 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11614 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
11616 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
11620 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
11621 * @pf: Pointer to board private structure
11622 * @lut: Lookup table
11623 * @rss_table_size: Lookup table size
11624 * @rss_size: Range of queue number for hashing
11626 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
11627 u16 rss_table_size, u16 rss_size)
11631 for (i = 0; i < rss_table_size; i++)
11632 lut[i] = i % rss_size;
11636 * i40e_pf_config_rss - Prepare for RSS if used
11637 * @pf: board private structure
11639 static int i40e_pf_config_rss(struct i40e_pf *pf)
11641 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11642 u8 seed[I40E_HKEY_ARRAY_SIZE];
11644 struct i40e_hw *hw = &pf->hw;
11649 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
11650 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
11651 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
11652 hena |= i40e_pf_get_default_rss_hena(pf);
11654 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
11655 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
11657 /* Determine the RSS table size based on the hardware capabilities */
11658 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
11659 reg_val = (pf->rss_table_size == 512) ?
11660 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
11661 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
11662 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
11664 /* Determine the RSS size of the VSI */
11665 if (!vsi->rss_size) {
11667 /* If the firmware does something weird during VSI init, we
11668 * could end up with zero TCs. Check for that to avoid
11669 * divide-by-zero. It probably won't pass traffic, but it also
11672 qcount = vsi->num_queue_pairs /
11673 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
11674 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11676 if (!vsi->rss_size)
11679 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
11683 /* Use user configured lut if there is one, otherwise use default */
11684 if (vsi->rss_lut_user)
11685 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
11687 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
11689 /* Use user configured hash key if there is one, otherwise
11692 if (vsi->rss_hkey_user)
11693 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
11695 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
11696 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
11703 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
11704 * @pf: board private structure
11705 * @queue_count: the requested queue count for rss.
11707 * returns 0 if rss is not enabled, if enabled returns the final rss queue
11708 * count which may be different from the requested queue count.
11709 * Note: expects to be called while under rtnl_lock()
11711 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
11713 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11716 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
11719 queue_count = min_t(int, queue_count, num_online_cpus());
11720 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
11722 if (queue_count != vsi->num_queue_pairs) {
11725 vsi->req_queue_pairs = queue_count;
11726 i40e_prep_for_reset(pf, true);
11728 pf->alloc_rss_size = new_rss_size;
11730 i40e_reset_and_rebuild(pf, true, true);
11732 /* Discard the user configured hash keys and lut, if less
11733 * queues are enabled.
11735 if (queue_count < vsi->rss_size) {
11736 i40e_clear_rss_config_user(vsi);
11737 dev_dbg(&pf->pdev->dev,
11738 "discard user configured hash keys and lut\n");
11741 /* Reset vsi->rss_size, as number of enabled queues changed */
11742 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
11743 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11745 i40e_pf_config_rss(pf);
11747 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
11748 vsi->req_queue_pairs, pf->rss_size_max);
11749 return pf->alloc_rss_size;
11753 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
11754 * @pf: board private structure
11756 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
11758 i40e_status status;
11759 bool min_valid, max_valid;
11760 u32 max_bw, min_bw;
11762 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
11763 &min_valid, &max_valid);
11767 pf->min_bw = min_bw;
11769 pf->max_bw = max_bw;
11776 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
11777 * @pf: board private structure
11779 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
11781 struct i40e_aqc_configure_partition_bw_data bw_data;
11782 i40e_status status;
11784 /* Set the valid bit for this PF */
11785 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
11786 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
11787 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
11789 /* Set the new bandwidths */
11790 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
11796 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
11797 * @pf: board private structure
11799 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
11801 /* Commit temporary BW setting to permanent NVM image */
11802 enum i40e_admin_queue_err last_aq_status;
11806 if (pf->hw.partition_id != 1) {
11807 dev_info(&pf->pdev->dev,
11808 "Commit BW only works on partition 1! This is partition %d",
11809 pf->hw.partition_id);
11810 ret = I40E_NOT_SUPPORTED;
11811 goto bw_commit_out;
11814 /* Acquire NVM for read access */
11815 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
11816 last_aq_status = pf->hw.aq.asq_last_status;
11818 dev_info(&pf->pdev->dev,
11819 "Cannot acquire NVM for read access, err %s aq_err %s\n",
11820 i40e_stat_str(&pf->hw, ret),
11821 i40e_aq_str(&pf->hw, last_aq_status));
11822 goto bw_commit_out;
11825 /* Read word 0x10 of NVM - SW compatibility word 1 */
11826 ret = i40e_aq_read_nvm(&pf->hw,
11827 I40E_SR_NVM_CONTROL_WORD,
11828 0x10, sizeof(nvm_word), &nvm_word,
11830 /* Save off last admin queue command status before releasing
11833 last_aq_status = pf->hw.aq.asq_last_status;
11834 i40e_release_nvm(&pf->hw);
11836 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
11837 i40e_stat_str(&pf->hw, ret),
11838 i40e_aq_str(&pf->hw, last_aq_status));
11839 goto bw_commit_out;
11842 /* Wait a bit for NVM release to complete */
11845 /* Acquire NVM for write access */
11846 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
11847 last_aq_status = pf->hw.aq.asq_last_status;
11849 dev_info(&pf->pdev->dev,
11850 "Cannot acquire NVM for write access, err %s aq_err %s\n",
11851 i40e_stat_str(&pf->hw, ret),
11852 i40e_aq_str(&pf->hw, last_aq_status));
11853 goto bw_commit_out;
11855 /* Write it back out unchanged to initiate update NVM,
11856 * which will force a write of the shadow (alt) RAM to
11857 * the NVM - thus storing the bandwidth values permanently.
11859 ret = i40e_aq_update_nvm(&pf->hw,
11860 I40E_SR_NVM_CONTROL_WORD,
11861 0x10, sizeof(nvm_word),
11862 &nvm_word, true, 0, NULL);
11863 /* Save off last admin queue command status before releasing
11866 last_aq_status = pf->hw.aq.asq_last_status;
11867 i40e_release_nvm(&pf->hw);
11869 dev_info(&pf->pdev->dev,
11870 "BW settings NOT SAVED, err %s aq_err %s\n",
11871 i40e_stat_str(&pf->hw, ret),
11872 i40e_aq_str(&pf->hw, last_aq_status));
11879 * i40e_is_total_port_shutdown_enabled - read NVM and return value
11880 * if total port shutdown feature is enabled for this PF
11881 * @pf: board private structure
11883 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
11885 #define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4)
11886 #define I40E_FEATURES_ENABLE_PTR 0x2A
11887 #define I40E_CURRENT_SETTING_PTR 0x2B
11888 #define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D
11889 #define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
11890 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
11891 #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
11892 i40e_status read_status = I40E_SUCCESS;
11893 u16 sr_emp_sr_settings_ptr = 0;
11894 u16 features_enable = 0;
11895 u16 link_behavior = 0;
11898 read_status = i40e_read_nvm_word(&pf->hw,
11899 I40E_SR_EMP_SR_SETTINGS_PTR,
11900 &sr_emp_sr_settings_ptr);
11903 read_status = i40e_read_nvm_word(&pf->hw,
11904 sr_emp_sr_settings_ptr +
11905 I40E_FEATURES_ENABLE_PTR,
11909 if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
11910 read_status = i40e_read_nvm_module_data(&pf->hw,
11911 I40E_SR_EMP_SR_SETTINGS_PTR,
11912 I40E_CURRENT_SETTING_PTR,
11913 I40E_LINK_BEHAVIOR_WORD_OFFSET,
11914 I40E_LINK_BEHAVIOR_WORD_LENGTH,
11918 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
11919 ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
11924 dev_warn(&pf->pdev->dev,
11925 "total-port-shutdown feature is off due to read nvm error: %s\n",
11926 i40e_stat_str(&pf->hw, read_status));
11931 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
11932 * @pf: board private structure to initialize
11934 * i40e_sw_init initializes the Adapter private data structure.
11935 * Fields are initialized based on PCI device information and
11936 * OS network device settings (MTU size).
11938 static int i40e_sw_init(struct i40e_pf *pf)
11943 /* Set default capability flags */
11944 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
11945 I40E_FLAG_MSI_ENABLED |
11946 I40E_FLAG_MSIX_ENABLED;
11948 /* Set default ITR */
11949 pf->rx_itr_default = I40E_ITR_RX_DEF;
11950 pf->tx_itr_default = I40E_ITR_TX_DEF;
11952 /* Depending on PF configurations, it is possible that the RSS
11953 * maximum might end up larger than the available queues
11955 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
11956 pf->alloc_rss_size = 1;
11957 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
11958 pf->rss_size_max = min_t(int, pf->rss_size_max,
11959 pf->hw.func_caps.num_tx_qp);
11960 if (pf->hw.func_caps.rss) {
11961 pf->flags |= I40E_FLAG_RSS_ENABLED;
11962 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
11963 num_online_cpus());
11966 /* MFP mode enabled */
11967 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
11968 pf->flags |= I40E_FLAG_MFP_ENABLED;
11969 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
11970 if (i40e_get_partition_bw_setting(pf)) {
11971 dev_warn(&pf->pdev->dev,
11972 "Could not get partition bw settings\n");
11974 dev_info(&pf->pdev->dev,
11975 "Partition BW Min = %8.8x, Max = %8.8x\n",
11976 pf->min_bw, pf->max_bw);
11978 /* nudge the Tx scheduler */
11979 i40e_set_partition_bw_setting(pf);
11983 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
11984 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
11985 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
11986 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
11987 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
11988 pf->hw.num_partitions > 1)
11989 dev_info(&pf->pdev->dev,
11990 "Flow Director Sideband mode Disabled in MFP mode\n");
11992 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11993 pf->fdir_pf_filter_count =
11994 pf->hw.func_caps.fd_filters_guaranteed;
11995 pf->hw.fdir_shared_filter_count =
11996 pf->hw.func_caps.fd_filters_best_effort;
11999 if (pf->hw.mac.type == I40E_MAC_X722) {
12000 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
12001 I40E_HW_128_QP_RSS_CAPABLE |
12002 I40E_HW_ATR_EVICT_CAPABLE |
12003 I40E_HW_WB_ON_ITR_CAPABLE |
12004 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
12005 I40E_HW_NO_PCI_LINK_CHECK |
12006 I40E_HW_USE_SET_LLDP_MIB |
12007 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
12008 I40E_HW_PTP_L4_CAPABLE |
12009 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
12010 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
12012 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
12013 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
12014 I40E_FDEVICT_PCTYPE_DEFAULT) {
12015 dev_warn(&pf->pdev->dev,
12016 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
12017 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
12019 } else if ((pf->hw.aq.api_maj_ver > 1) ||
12020 ((pf->hw.aq.api_maj_ver == 1) &&
12021 (pf->hw.aq.api_min_ver > 4))) {
12022 /* Supported in FW API version higher than 1.4 */
12023 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
12026 /* Enable HW ATR eviction if possible */
12027 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
12028 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
12030 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12031 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
12032 (pf->hw.aq.fw_maj_ver < 4))) {
12033 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
12034 /* No DCB support for FW < v4.33 */
12035 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
12038 /* Disable FW LLDP if FW < v4.3 */
12039 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12040 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
12041 (pf->hw.aq.fw_maj_ver < 4)))
12042 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
12044 /* Use the FW Set LLDP MIB API if FW > v4.40 */
12045 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12046 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
12047 (pf->hw.aq.fw_maj_ver >= 5)))
12048 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
12050 /* Enable PTP L4 if FW > v6.0 */
12051 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12052 pf->hw.aq.fw_maj_ver >= 6)
12053 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
12055 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
12056 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
12057 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
12058 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
12061 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
12062 pf->flags |= I40E_FLAG_IWARP_ENABLED;
12063 /* IWARP needs one extra vector for CQP just like MISC.*/
12064 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
12066 /* Stopping FW LLDP engine is supported on XL710 and X722
12067 * starting from FW versions determined in i40e_init_adminq.
12068 * Stopping the FW LLDP engine is not supported on XL710
12069 * if NPAR is functioning so unset this hw flag in this case.
12071 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12072 pf->hw.func_caps.npar_enable &&
12073 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
12074 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
12076 #ifdef CONFIG_PCI_IOV
12077 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
12078 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
12079 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
12080 pf->num_req_vfs = min_t(int,
12081 pf->hw.func_caps.num_vfs,
12082 I40E_MAX_VF_COUNT);
12084 #endif /* CONFIG_PCI_IOV */
12085 pf->eeprom_version = 0xDEAD;
12086 pf->lan_veb = I40E_NO_VEB;
12087 pf->lan_vsi = I40E_NO_VSI;
12089 /* By default FW has this off for performance reasons */
12090 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
12092 /* set up queue assignment tracking */
12093 size = sizeof(struct i40e_lump_tracking)
12094 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12095 pf->qp_pile = kzalloc(size, GFP_KERNEL);
12096 if (!pf->qp_pile) {
12100 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12101 pf->qp_pile->search_hint = 0;
12103 pf->tx_timeout_recovery_level = 1;
12105 if (pf->hw.mac.type != I40E_MAC_X722 &&
12106 i40e_is_total_port_shutdown_enabled(pf)) {
12107 /* Link down on close must be on when total port shutdown
12108 * is enabled for a given port
12110 pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
12111 I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
12112 dev_info(&pf->pdev->dev,
12113 "total-port-shutdown was enabled, link-down-on-close is forced on\n");
12115 mutex_init(&pf->switch_mutex);
12122 * i40e_set_ntuple - set the ntuple feature flag and take action
12123 * @pf: board private structure to initialize
12124 * @features: the feature set that the stack is suggesting
12126 * returns a bool to indicate if reset needs to happen
12128 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12130 bool need_reset = false;
12132 /* Check if Flow Director n-tuple support was enabled or disabled. If
12133 * the state changed, we need to reset.
12135 if (features & NETIF_F_NTUPLE) {
12136 /* Enable filters and mark for reset */
12137 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
12139 /* enable FD_SB only if there is MSI-X vector and no cloud
12142 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12143 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12144 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
12147 /* turn off filters, mark for reset and clear SW filter list */
12148 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
12150 i40e_fdir_filter_exit(pf);
12152 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
12153 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12154 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12156 /* reset fd counters */
12157 pf->fd_add_err = 0;
12158 pf->fd_atr_cnt = 0;
12159 /* if ATR was auto disabled it can be re-enabled. */
12160 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12161 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
12162 (I40E_DEBUG_FD & pf->hw.debug_mask))
12163 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12169 * i40e_clear_rss_lut - clear the rx hash lookup table
12170 * @vsi: the VSI being configured
12172 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12174 struct i40e_pf *pf = vsi->back;
12175 struct i40e_hw *hw = &pf->hw;
12176 u16 vf_id = vsi->vf_id;
12179 if (vsi->type == I40E_VSI_MAIN) {
12180 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12181 wr32(hw, I40E_PFQF_HLUT(i), 0);
12182 } else if (vsi->type == I40E_VSI_SRIOV) {
12183 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12184 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12186 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12191 * i40e_set_features - set the netdev feature flags
12192 * @netdev: ptr to the netdev being adjusted
12193 * @features: the feature set that the stack is suggesting
12194 * Note: expects to be called while under rtnl_lock()
12196 static int i40e_set_features(struct net_device *netdev,
12197 netdev_features_t features)
12199 struct i40e_netdev_priv *np = netdev_priv(netdev);
12200 struct i40e_vsi *vsi = np->vsi;
12201 struct i40e_pf *pf = vsi->back;
12204 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12205 i40e_pf_config_rss(pf);
12206 else if (!(features & NETIF_F_RXHASH) &&
12207 netdev->features & NETIF_F_RXHASH)
12208 i40e_clear_rss_lut(vsi);
12210 if (features & NETIF_F_HW_VLAN_CTAG_RX)
12211 i40e_vlan_stripping_enable(vsi);
12213 i40e_vlan_stripping_disable(vsi);
12215 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
12216 dev_err(&pf->pdev->dev,
12217 "Offloaded tc filters active, can't turn hw_tc_offload off");
12221 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12222 i40e_del_all_macvlans(vsi);
12224 need_reset = i40e_set_ntuple(pf, features);
12227 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12233 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
12234 * @pf: board private structure
12235 * @port: The UDP port to look up
12237 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
12239 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
12243 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
12244 /* Do not report ports with pending deletions as
12247 if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
12249 if (pf->udp_ports[i].port == port)
12257 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
12258 * @netdev: This physical port's netdev
12259 * @ti: Tunnel endpoint information
12261 static void i40e_udp_tunnel_add(struct net_device *netdev,
12262 struct udp_tunnel_info *ti)
12264 struct i40e_netdev_priv *np = netdev_priv(netdev);
12265 struct i40e_vsi *vsi = np->vsi;
12266 struct i40e_pf *pf = vsi->back;
12267 u16 port = ntohs(ti->port);
12271 idx = i40e_get_udp_port_idx(pf, port);
12273 /* Check if port already exists */
12274 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
12275 netdev_info(netdev, "port %d already offloaded\n", port);
12279 /* Now check if there is space to add the new port */
12280 next_idx = i40e_get_udp_port_idx(pf, 0);
12282 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
12283 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
12288 switch (ti->type) {
12289 case UDP_TUNNEL_TYPE_VXLAN:
12290 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
12292 case UDP_TUNNEL_TYPE_GENEVE:
12293 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
12295 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
12301 /* New port: add it and mark its index in the bitmap */
12302 pf->udp_ports[next_idx].port = port;
12303 pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
12304 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
12305 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
12309 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
12310 * @netdev: This physical port's netdev
12311 * @ti: Tunnel endpoint information
12313 static void i40e_udp_tunnel_del(struct net_device *netdev,
12314 struct udp_tunnel_info *ti)
12316 struct i40e_netdev_priv *np = netdev_priv(netdev);
12317 struct i40e_vsi *vsi = np->vsi;
12318 struct i40e_pf *pf = vsi->back;
12319 u16 port = ntohs(ti->port);
12322 idx = i40e_get_udp_port_idx(pf, port);
12324 /* Check if port already exists */
12325 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
12328 switch (ti->type) {
12329 case UDP_TUNNEL_TYPE_VXLAN:
12330 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
12333 case UDP_TUNNEL_TYPE_GENEVE:
12334 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
12341 /* if port exists, set it to 0 (mark for deletion)
12342 * and make it pending
12344 pf->udp_ports[idx].port = 0;
12346 /* Toggle pending bit instead of setting it. This way if we are
12347 * deleting a port that has yet to be added we just clear the pending
12348 * bit and don't have to worry about it.
12350 pf->pending_udp_bitmap ^= BIT_ULL(idx);
12351 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
12355 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
12359 static int i40e_get_phys_port_id(struct net_device *netdev,
12360 struct netdev_phys_item_id *ppid)
12362 struct i40e_netdev_priv *np = netdev_priv(netdev);
12363 struct i40e_pf *pf = np->vsi->back;
12364 struct i40e_hw *hw = &pf->hw;
12366 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
12367 return -EOPNOTSUPP;
12369 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
12370 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
12376 * i40e_ndo_fdb_add - add an entry to the hardware database
12377 * @ndm: the input from the stack
12378 * @tb: pointer to array of nladdr (unused)
12379 * @dev: the net device pointer
12380 * @addr: the MAC address entry being added
12382 * @flags: instructions from stack about fdb operation
12384 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
12385 struct net_device *dev,
12386 const unsigned char *addr, u16 vid,
12388 struct netlink_ext_ack *extack)
12390 struct i40e_netdev_priv *np = netdev_priv(dev);
12391 struct i40e_pf *pf = np->vsi->back;
12394 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
12395 return -EOPNOTSUPP;
12398 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
12402 /* Hardware does not support aging addresses so if a
12403 * ndm_state is given only allow permanent addresses
12405 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
12406 netdev_info(dev, "FDB only supports static addresses\n");
12410 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
12411 err = dev_uc_add_excl(dev, addr);
12412 else if (is_multicast_ether_addr(addr))
12413 err = dev_mc_add_excl(dev, addr);
12417 /* Only return duplicate errors if NLM_F_EXCL is set */
12418 if (err == -EEXIST && !(flags & NLM_F_EXCL))
12425 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
12426 * @dev: the netdev being configured
12427 * @nlh: RTNL message
12428 * @flags: bridge flags
12429 * @extack: netlink extended ack
12431 * Inserts a new hardware bridge if not already created and
12432 * enables the bridging mode requested (VEB or VEPA). If the
12433 * hardware bridge has already been inserted and the request
12434 * is to change the mode then that requires a PF reset to
12435 * allow rebuild of the components with required hardware
12436 * bridge mode enabled.
12438 * Note: expects to be called while under rtnl_lock()
12440 static int i40e_ndo_bridge_setlink(struct net_device *dev,
12441 struct nlmsghdr *nlh,
12443 struct netlink_ext_ack *extack)
12445 struct i40e_netdev_priv *np = netdev_priv(dev);
12446 struct i40e_vsi *vsi = np->vsi;
12447 struct i40e_pf *pf = vsi->back;
12448 struct i40e_veb *veb = NULL;
12449 struct nlattr *attr, *br_spec;
12452 /* Only for PF VSI for now */
12453 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12454 return -EOPNOTSUPP;
12456 /* Find the HW bridge for PF VSI */
12457 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12458 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12462 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12464 nla_for_each_nested(attr, br_spec, rem) {
12467 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12470 mode = nla_get_u16(attr);
12471 if ((mode != BRIDGE_MODE_VEPA) &&
12472 (mode != BRIDGE_MODE_VEB))
12475 /* Insert a new HW bridge */
12477 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12478 vsi->tc_config.enabled_tc);
12480 veb->bridge_mode = mode;
12481 i40e_config_bridge_mode(veb);
12483 /* No Bridge HW offload available */
12487 } else if (mode != veb->bridge_mode) {
12488 /* Existing HW bridge but different mode needs reset */
12489 veb->bridge_mode = mode;
12490 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
12491 if (mode == BRIDGE_MODE_VEB)
12492 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
12494 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12495 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12504 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
12507 * @seq: RTNL message seq #
12508 * @dev: the netdev being configured
12509 * @filter_mask: unused
12510 * @nlflags: netlink flags passed in
12512 * Return the mode in which the hardware bridge is operating in
12515 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12516 struct net_device *dev,
12517 u32 __always_unused filter_mask,
12520 struct i40e_netdev_priv *np = netdev_priv(dev);
12521 struct i40e_vsi *vsi = np->vsi;
12522 struct i40e_pf *pf = vsi->back;
12523 struct i40e_veb *veb = NULL;
12526 /* Only for PF VSI for now */
12527 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12528 return -EOPNOTSUPP;
12530 /* Find the HW bridge for the PF VSI */
12531 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12532 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12539 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
12540 0, 0, nlflags, filter_mask, NULL);
12544 * i40e_features_check - Validate encapsulated packet conforms to limits
12546 * @dev: This physical port's netdev
12547 * @features: Offload features that the stack believes apply
12549 static netdev_features_t i40e_features_check(struct sk_buff *skb,
12550 struct net_device *dev,
12551 netdev_features_t features)
12555 /* No point in doing any of this if neither checksum nor GSO are
12556 * being requested for this frame. We can rule out both by just
12557 * checking for CHECKSUM_PARTIAL
12559 if (skb->ip_summed != CHECKSUM_PARTIAL)
12562 /* We cannot support GSO if the MSS is going to be less than
12563 * 64 bytes. If it is then we need to drop support for GSO.
12565 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
12566 features &= ~NETIF_F_GSO_MASK;
12568 /* MACLEN can support at most 63 words */
12569 len = skb_network_header(skb) - skb->data;
12570 if (len & ~(63 * 2))
12573 /* IPLEN and EIPLEN can support at most 127 dwords */
12574 len = skb_transport_header(skb) - skb_network_header(skb);
12575 if (len & ~(127 * 4))
12578 if (skb->encapsulation) {
12579 /* L4TUNLEN can support 127 words */
12580 len = skb_inner_network_header(skb) - skb_transport_header(skb);
12581 if (len & ~(127 * 2))
12584 /* IPLEN can support at most 127 dwords */
12585 len = skb_inner_transport_header(skb) -
12586 skb_inner_network_header(skb);
12587 if (len & ~(127 * 4))
12591 /* No need to validate L4LEN as TCP is the only protocol with a
12592 * a flexible value and we support all possible values supported
12593 * by TCP, which is at most 15 dwords
12598 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
12602 * i40e_xdp_setup - add/remove an XDP program
12603 * @vsi: VSI to changed
12604 * @prog: XDP program
12606 static int i40e_xdp_setup(struct i40e_vsi *vsi,
12607 struct bpf_prog *prog)
12609 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
12610 struct i40e_pf *pf = vsi->back;
12611 struct bpf_prog *old_prog;
12615 /* Don't allow frames that span over multiple buffers */
12616 if (frame_size > vsi->rx_buf_len)
12619 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
12622 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
12623 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
12626 i40e_prep_for_reset(pf, true);
12628 old_prog = xchg(&vsi->xdp_prog, prog);
12632 /* Wait until ndo_xsk_wakeup completes. */
12634 i40e_reset_and_rebuild(pf, true, true);
12637 for (i = 0; i < vsi->num_queue_pairs; i++)
12638 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
12641 bpf_prog_put(old_prog);
12643 /* Kick start the NAPI context if there is an AF_XDP socket open
12644 * on that queue id. This so that receiving will start.
12646 if (need_reset && prog)
12647 for (i = 0; i < vsi->num_queue_pairs; i++)
12648 if (vsi->xdp_rings[i]->xsk_pool)
12649 (void)i40e_xsk_wakeup(vsi->netdev, i,
12656 * i40e_enter_busy_conf - Enters busy config state
12659 * Returns 0 on success, <0 for failure.
12661 static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
12663 struct i40e_pf *pf = vsi->back;
12666 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
12670 usleep_range(1000, 2000);
12677 * i40e_exit_busy_conf - Exits busy config state
12680 static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
12682 struct i40e_pf *pf = vsi->back;
12684 clear_bit(__I40E_CONFIG_BUSY, pf->state);
12688 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
12690 * @queue_pair: queue pair
12692 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
12694 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
12695 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
12696 memset(&vsi->tx_rings[queue_pair]->stats, 0,
12697 sizeof(vsi->tx_rings[queue_pair]->stats));
12698 if (i40e_enabled_xdp_vsi(vsi)) {
12699 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
12700 sizeof(vsi->xdp_rings[queue_pair]->stats));
12705 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
12707 * @queue_pair: queue pair
12709 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
12711 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
12712 if (i40e_enabled_xdp_vsi(vsi)) {
12713 /* Make sure that in-progress ndo_xdp_xmit calls are
12717 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
12719 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
12723 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
12725 * @queue_pair: queue pair
12726 * @enable: true for enable, false for disable
12728 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
12731 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12732 struct i40e_q_vector *q_vector = rxr->q_vector;
12737 /* All rings in a qp belong to the same qvector. */
12738 if (q_vector->rx.ring || q_vector->tx.ring) {
12740 napi_enable(&q_vector->napi);
12742 napi_disable(&q_vector->napi);
12747 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
12749 * @queue_pair: queue pair
12750 * @enable: true for enable, false for disable
12752 * Returns 0 on success, <0 on failure.
12754 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
12757 struct i40e_pf *pf = vsi->back;
12760 pf_q = vsi->base_queue + queue_pair;
12761 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
12762 false /*is xdp*/, enable);
12764 dev_info(&pf->pdev->dev,
12765 "VSI seid %d Tx ring %d %sable timeout\n",
12766 vsi->seid, pf_q, (enable ? "en" : "dis"));
12770 i40e_control_rx_q(pf, pf_q, enable);
12771 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
12773 dev_info(&pf->pdev->dev,
12774 "VSI seid %d Rx ring %d %sable timeout\n",
12775 vsi->seid, pf_q, (enable ? "en" : "dis"));
12779 /* Due to HW errata, on Rx disable only, the register can
12780 * indicate done before it really is. Needs 50ms to be sure
12785 if (!i40e_enabled_xdp_vsi(vsi))
12788 ret = i40e_control_wait_tx_q(vsi->seid, pf,
12789 pf_q + vsi->alloc_queue_pairs,
12790 true /*is xdp*/, enable);
12792 dev_info(&pf->pdev->dev,
12793 "VSI seid %d XDP Tx ring %d %sable timeout\n",
12794 vsi->seid, pf_q, (enable ? "en" : "dis"));
12801 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
12803 * @queue_pair: queue_pair
12805 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
12807 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12808 struct i40e_pf *pf = vsi->back;
12809 struct i40e_hw *hw = &pf->hw;
12811 /* All rings in a qp belong to the same qvector. */
12812 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
12813 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
12815 i40e_irq_dynamic_enable_icr0(pf);
12821 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
12823 * @queue_pair: queue_pair
12825 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
12827 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12828 struct i40e_pf *pf = vsi->back;
12829 struct i40e_hw *hw = &pf->hw;
12831 /* For simplicity, instead of removing the qp interrupt causes
12832 * from the interrupt linked list, we simply disable the interrupt, and
12833 * leave the list intact.
12835 * All rings in a qp belong to the same qvector.
12837 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12838 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
12840 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
12842 synchronize_irq(pf->msix_entries[intpf].vector);
12844 /* Legacy and MSI mode - this stops all interrupt handling */
12845 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
12846 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
12848 synchronize_irq(pf->pdev->irq);
12853 * i40e_queue_pair_disable - Disables a queue pair
12855 * @queue_pair: queue pair
12857 * Returns 0 on success, <0 on failure.
12859 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
12863 err = i40e_enter_busy_conf(vsi);
12867 i40e_queue_pair_disable_irq(vsi, queue_pair);
12868 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
12869 i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
12870 i40e_queue_pair_clean_rings(vsi, queue_pair);
12871 i40e_queue_pair_reset_stats(vsi, queue_pair);
12877 * i40e_queue_pair_enable - Enables a queue pair
12879 * @queue_pair: queue pair
12881 * Returns 0 on success, <0 on failure.
12883 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
12887 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
12891 if (i40e_enabled_xdp_vsi(vsi)) {
12892 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
12897 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
12901 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
12902 i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
12903 i40e_queue_pair_enable_irq(vsi, queue_pair);
12905 i40e_exit_busy_conf(vsi);
12911 * i40e_xdp - implements ndo_bpf for i40e
12913 * @xdp: XDP command
12915 static int i40e_xdp(struct net_device *dev,
12916 struct netdev_bpf *xdp)
12918 struct i40e_netdev_priv *np = netdev_priv(dev);
12919 struct i40e_vsi *vsi = np->vsi;
12921 if (vsi->type != I40E_VSI_MAIN)
12924 switch (xdp->command) {
12925 case XDP_SETUP_PROG:
12926 return i40e_xdp_setup(vsi, xdp->prog);
12927 case XDP_SETUP_XSK_POOL:
12928 return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
12929 xdp->xsk.queue_id);
12935 static const struct net_device_ops i40e_netdev_ops = {
12936 .ndo_open = i40e_open,
12937 .ndo_stop = i40e_close,
12938 .ndo_start_xmit = i40e_lan_xmit_frame,
12939 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
12940 .ndo_set_rx_mode = i40e_set_rx_mode,
12941 .ndo_validate_addr = eth_validate_addr,
12942 .ndo_set_mac_address = i40e_set_mac,
12943 .ndo_change_mtu = i40e_change_mtu,
12944 .ndo_do_ioctl = i40e_ioctl,
12945 .ndo_tx_timeout = i40e_tx_timeout,
12946 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
12947 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
12948 #ifdef CONFIG_NET_POLL_CONTROLLER
12949 .ndo_poll_controller = i40e_netpoll,
12951 .ndo_setup_tc = __i40e_setup_tc,
12952 .ndo_set_features = i40e_set_features,
12953 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
12954 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
12955 .ndo_get_vf_stats = i40e_get_vf_stats,
12956 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
12957 .ndo_get_vf_config = i40e_ndo_get_vf_config,
12958 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
12959 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
12960 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
12961 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
12962 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
12963 .ndo_get_phys_port_id = i40e_get_phys_port_id,
12964 .ndo_fdb_add = i40e_ndo_fdb_add,
12965 .ndo_features_check = i40e_features_check,
12966 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
12967 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
12968 .ndo_bpf = i40e_xdp,
12969 .ndo_xdp_xmit = i40e_xdp_xmit,
12970 .ndo_xsk_wakeup = i40e_xsk_wakeup,
12971 .ndo_dfwd_add_station = i40e_fwd_add,
12972 .ndo_dfwd_del_station = i40e_fwd_del,
12976 * i40e_config_netdev - Setup the netdev flags
12977 * @vsi: the VSI being configured
12979 * Returns 0 on success, negative value on failure
12981 static int i40e_config_netdev(struct i40e_vsi *vsi)
12983 struct i40e_pf *pf = vsi->back;
12984 struct i40e_hw *hw = &pf->hw;
12985 struct i40e_netdev_priv *np;
12986 struct net_device *netdev;
12987 u8 broadcast[ETH_ALEN];
12988 u8 mac_addr[ETH_ALEN];
12990 netdev_features_t hw_enc_features;
12991 netdev_features_t hw_features;
12993 etherdev_size = sizeof(struct i40e_netdev_priv);
12994 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
12998 vsi->netdev = netdev;
12999 np = netdev_priv(netdev);
13002 hw_enc_features = NETIF_F_SG |
13004 NETIF_F_IPV6_CSUM |
13006 NETIF_F_SOFT_FEATURES |
13011 NETIF_F_GSO_GRE_CSUM |
13012 NETIF_F_GSO_PARTIAL |
13013 NETIF_F_GSO_IPXIP4 |
13014 NETIF_F_GSO_IPXIP6 |
13015 NETIF_F_GSO_UDP_TUNNEL |
13016 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13017 NETIF_F_GSO_UDP_L4 |
13023 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
13024 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
13026 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
13028 netdev->hw_enc_features |= hw_enc_features;
13030 /* record features VLANs can make use of */
13031 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
13033 /* enable macvlan offloads */
13034 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
13036 hw_features = hw_enc_features |
13037 NETIF_F_HW_VLAN_CTAG_TX |
13038 NETIF_F_HW_VLAN_CTAG_RX;
13040 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
13041 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
13043 netdev->hw_features |= hw_features;
13045 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
13046 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
13048 if (vsi->type == I40E_VSI_MAIN) {
13049 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
13050 ether_addr_copy(mac_addr, hw->mac.perm_addr);
13051 /* The following steps are necessary for two reasons. First,
13052 * some older NVM configurations load a default MAC-VLAN
13053 * filter that will accept any tagged packet, and we want to
13054 * replace this with a normal filter. Additionally, it is
13055 * possible our MAC address was provided by the platform using
13056 * Open Firmware or similar.
13058 * Thus, we need to remove the default filter and install one
13059 * specific to the MAC address.
13061 i40e_rm_default_mac_filter(vsi, mac_addr);
13062 spin_lock_bh(&vsi->mac_filter_hash_lock);
13063 i40e_add_mac_filter(vsi, mac_addr);
13064 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13066 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
13067 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
13068 * the end, which is 4 bytes long, so force truncation of the
13069 * original name by IFNAMSIZ - 4
13071 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
13073 pf->vsi[pf->lan_vsi]->netdev->name);
13074 eth_random_addr(mac_addr);
13076 spin_lock_bh(&vsi->mac_filter_hash_lock);
13077 i40e_add_mac_filter(vsi, mac_addr);
13078 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13081 /* Add the broadcast filter so that we initially will receive
13082 * broadcast packets. Note that when a new VLAN is first added the
13083 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
13084 * specific filters as part of transitioning into "vlan" operation.
13085 * When more VLANs are added, the driver will copy each existing MAC
13086 * filter and add it for the new VLAN.
13088 * Broadcast filters are handled specially by
13089 * i40e_sync_filters_subtask, as the driver must to set the broadcast
13090 * promiscuous bit instead of adding this directly as a MAC/VLAN
13091 * filter. The subtask will update the correct broadcast promiscuous
13092 * bits as VLANs become active or inactive.
13094 eth_broadcast_addr(broadcast);
13095 spin_lock_bh(&vsi->mac_filter_hash_lock);
13096 i40e_add_mac_filter(vsi, broadcast);
13097 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13099 ether_addr_copy(netdev->dev_addr, mac_addr);
13100 ether_addr_copy(netdev->perm_addr, mac_addr);
13102 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
13103 netdev->neigh_priv_len = sizeof(u32) * 4;
13105 netdev->priv_flags |= IFF_UNICAST_FLT;
13106 netdev->priv_flags |= IFF_SUPP_NOFCS;
13107 /* Setup netdev TC information */
13108 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
13110 netdev->netdev_ops = &i40e_netdev_ops;
13111 netdev->watchdog_timeo = 5 * HZ;
13112 i40e_set_ethtool_ops(netdev);
13114 /* MTU range: 68 - 9706 */
13115 netdev->min_mtu = ETH_MIN_MTU;
13116 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13122 * i40e_vsi_delete - Delete a VSI from the switch
13123 * @vsi: the VSI being removed
13125 * Returns 0 on success, negative value on failure
13127 static void i40e_vsi_delete(struct i40e_vsi *vsi)
13129 /* remove default VSI is not allowed */
13130 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13133 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
13137 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
13138 * @vsi: the VSI being queried
13140 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
13142 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13144 struct i40e_veb *veb;
13145 struct i40e_pf *pf = vsi->back;
13147 /* Uplink is not a bridge so default to VEB */
13148 if (vsi->veb_idx >= I40E_MAX_VEB)
13151 veb = pf->veb[vsi->veb_idx];
13153 dev_info(&pf->pdev->dev,
13154 "There is no veb associated with the bridge\n");
13158 /* Uplink is a bridge in VEPA mode */
13159 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13162 /* Uplink is a bridge in VEB mode */
13166 /* VEPA is now default bridge, so return 0 */
13171 * i40e_add_vsi - Add a VSI to the switch
13172 * @vsi: the VSI being configured
13174 * This initializes a VSI context depending on the VSI type to be added and
13175 * passes it down to the add_vsi aq command.
13177 static int i40e_add_vsi(struct i40e_vsi *vsi)
13180 struct i40e_pf *pf = vsi->back;
13181 struct i40e_hw *hw = &pf->hw;
13182 struct i40e_vsi_context ctxt;
13183 struct i40e_mac_filter *f;
13184 struct hlist_node *h;
13187 u8 enabled_tc = 0x1; /* TC0 enabled */
13190 memset(&ctxt, 0, sizeof(ctxt));
13191 switch (vsi->type) {
13192 case I40E_VSI_MAIN:
13193 /* The PF's main VSI is already setup as part of the
13194 * device initialization, so we'll not bother with
13195 * the add_vsi call, but we will retrieve the current
13198 ctxt.seid = pf->main_vsi_seid;
13199 ctxt.pf_num = pf->hw.pf_id;
13201 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13202 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13204 dev_info(&pf->pdev->dev,
13205 "couldn't get PF vsi config, err %s aq_err %s\n",
13206 i40e_stat_str(&pf->hw, ret),
13207 i40e_aq_str(&pf->hw,
13208 pf->hw.aq.asq_last_status));
13211 vsi->info = ctxt.info;
13212 vsi->info.valid_sections = 0;
13214 vsi->seid = ctxt.seid;
13215 vsi->id = ctxt.vsi_number;
13217 enabled_tc = i40e_pf_get_tc_map(pf);
13219 /* Source pruning is enabled by default, so the flag is
13220 * negative logic - if it's set, we need to fiddle with
13221 * the VSI to disable source pruning.
13223 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
13224 memset(&ctxt, 0, sizeof(ctxt));
13225 ctxt.seid = pf->main_vsi_seid;
13226 ctxt.pf_num = pf->hw.pf_id;
13228 ctxt.info.valid_sections |=
13229 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13230 ctxt.info.switch_id =
13231 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13232 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13234 dev_info(&pf->pdev->dev,
13235 "update vsi failed, err %s aq_err %s\n",
13236 i40e_stat_str(&pf->hw, ret),
13237 i40e_aq_str(&pf->hw,
13238 pf->hw.aq.asq_last_status));
13244 /* MFP mode setup queue map and update VSI */
13245 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
13246 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
13247 memset(&ctxt, 0, sizeof(ctxt));
13248 ctxt.seid = pf->main_vsi_seid;
13249 ctxt.pf_num = pf->hw.pf_id;
13251 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13252 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13254 dev_info(&pf->pdev->dev,
13255 "update vsi failed, err %s aq_err %s\n",
13256 i40e_stat_str(&pf->hw, ret),
13257 i40e_aq_str(&pf->hw,
13258 pf->hw.aq.asq_last_status));
13262 /* update the local VSI info queue map */
13263 i40e_vsi_update_queue_map(vsi, &ctxt);
13264 vsi->info.valid_sections = 0;
13266 /* Default/Main VSI is only enabled for TC0
13267 * reconfigure it to enable all TCs that are
13268 * available on the port in SFP mode.
13269 * For MFP case the iSCSI PF would use this
13270 * flow to enable LAN+iSCSI TC.
13272 ret = i40e_vsi_config_tc(vsi, enabled_tc);
13274 /* Single TC condition is not fatal,
13275 * message and continue
13277 dev_info(&pf->pdev->dev,
13278 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
13280 i40e_stat_str(&pf->hw, ret),
13281 i40e_aq_str(&pf->hw,
13282 pf->hw.aq.asq_last_status));
13287 case I40E_VSI_FDIR:
13288 ctxt.pf_num = hw->pf_id;
13290 ctxt.uplink_seid = vsi->uplink_seid;
13291 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13292 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13293 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
13294 (i40e_is_vsi_uplink_mode_veb(vsi))) {
13295 ctxt.info.valid_sections |=
13296 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13297 ctxt.info.switch_id =
13298 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13300 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13303 case I40E_VSI_VMDQ2:
13304 ctxt.pf_num = hw->pf_id;
13306 ctxt.uplink_seid = vsi->uplink_seid;
13307 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13308 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
13310 /* This VSI is connected to VEB so the switch_id
13311 * should be set to zero by default.
13313 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13314 ctxt.info.valid_sections |=
13315 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13316 ctxt.info.switch_id =
13317 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13320 /* Setup the VSI tx/rx queue map for TC0 only for now */
13321 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13324 case I40E_VSI_SRIOV:
13325 ctxt.pf_num = hw->pf_id;
13326 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
13327 ctxt.uplink_seid = vsi->uplink_seid;
13328 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13329 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
13331 /* This VSI is connected to VEB so the switch_id
13332 * should be set to zero by default.
13334 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13335 ctxt.info.valid_sections |=
13336 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13337 ctxt.info.switch_id =
13338 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13341 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
13342 ctxt.info.valid_sections |=
13343 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
13344 ctxt.info.queueing_opt_flags |=
13345 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
13346 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
13349 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
13350 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
13351 if (pf->vf[vsi->vf_id].spoofchk) {
13352 ctxt.info.valid_sections |=
13353 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
13354 ctxt.info.sec_flags |=
13355 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
13356 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
13358 /* Setup the VSI tx/rx queue map for TC0 only for now */
13359 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13362 case I40E_VSI_IWARP:
13363 /* send down message to iWARP */
13370 if (vsi->type != I40E_VSI_MAIN) {
13371 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
13373 dev_info(&vsi->back->pdev->dev,
13374 "add vsi failed, err %s aq_err %s\n",
13375 i40e_stat_str(&pf->hw, ret),
13376 i40e_aq_str(&pf->hw,
13377 pf->hw.aq.asq_last_status));
13381 vsi->info = ctxt.info;
13382 vsi->info.valid_sections = 0;
13383 vsi->seid = ctxt.seid;
13384 vsi->id = ctxt.vsi_number;
13387 vsi->active_filters = 0;
13388 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
13389 spin_lock_bh(&vsi->mac_filter_hash_lock);
13390 /* If macvlan filters already exist, force them to get loaded */
13391 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
13392 f->state = I40E_FILTER_NEW;
13395 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13398 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
13399 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
13402 /* Update VSI BW information */
13403 ret = i40e_vsi_get_bw_info(vsi);
13405 dev_info(&pf->pdev->dev,
13406 "couldn't get vsi bw info, err %s aq_err %s\n",
13407 i40e_stat_str(&pf->hw, ret),
13408 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13409 /* VSI is already added so not tearing that up */
13418 * i40e_vsi_release - Delete a VSI and free its resources
13419 * @vsi: the VSI being removed
13421 * Returns 0 on success or < 0 on error
13423 int i40e_vsi_release(struct i40e_vsi *vsi)
13425 struct i40e_mac_filter *f;
13426 struct hlist_node *h;
13427 struct i40e_veb *veb = NULL;
13428 struct i40e_pf *pf;
13434 /* release of a VEB-owner or last VSI is not allowed */
13435 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
13436 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
13437 vsi->seid, vsi->uplink_seid);
13440 if (vsi == pf->vsi[pf->lan_vsi] &&
13441 !test_bit(__I40E_DOWN, pf->state)) {
13442 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
13446 uplink_seid = vsi->uplink_seid;
13447 if (vsi->type != I40E_VSI_SRIOV) {
13448 if (vsi->netdev_registered) {
13449 vsi->netdev_registered = false;
13451 /* results in a call to i40e_close() */
13452 unregister_netdev(vsi->netdev);
13455 i40e_vsi_close(vsi);
13457 i40e_vsi_disable_irq(vsi);
13460 spin_lock_bh(&vsi->mac_filter_hash_lock);
13462 /* clear the sync flag on all filters */
13464 __dev_uc_unsync(vsi->netdev, NULL);
13465 __dev_mc_unsync(vsi->netdev, NULL);
13468 /* make sure any remaining filters are marked for deletion */
13469 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
13470 __i40e_del_filter(vsi, f);
13472 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13474 i40e_sync_vsi_filters(vsi);
13476 i40e_vsi_delete(vsi);
13477 i40e_vsi_free_q_vectors(vsi);
13479 free_netdev(vsi->netdev);
13480 vsi->netdev = NULL;
13482 i40e_vsi_clear_rings(vsi);
13483 i40e_vsi_clear(vsi);
13485 /* If this was the last thing on the VEB, except for the
13486 * controlling VSI, remove the VEB, which puts the controlling
13487 * VSI onto the next level down in the switch.
13489 * Well, okay, there's one more exception here: don't remove
13490 * the orphan VEBs yet. We'll wait for an explicit remove request
13491 * from up the network stack.
13493 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
13495 pf->vsi[i]->uplink_seid == uplink_seid &&
13496 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13497 n++; /* count the VSIs */
13500 for (i = 0; i < I40E_MAX_VEB; i++) {
13503 if (pf->veb[i]->uplink_seid == uplink_seid)
13504 n++; /* count the VEBs */
13505 if (pf->veb[i]->seid == uplink_seid)
13508 if (n == 0 && veb && veb->uplink_seid != 0)
13509 i40e_veb_release(veb);
13515 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
13516 * @vsi: ptr to the VSI
13518 * This should only be called after i40e_vsi_mem_alloc() which allocates the
13519 * corresponding SW VSI structure and initializes num_queue_pairs for the
13520 * newly allocated VSI.
13522 * Returns 0 on success or negative on failure
13524 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
13527 struct i40e_pf *pf = vsi->back;
13529 if (vsi->q_vectors[0]) {
13530 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
13535 if (vsi->base_vector) {
13536 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
13537 vsi->seid, vsi->base_vector);
13541 ret = i40e_vsi_alloc_q_vectors(vsi);
13543 dev_info(&pf->pdev->dev,
13544 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
13545 vsi->num_q_vectors, vsi->seid, ret);
13546 vsi->num_q_vectors = 0;
13547 goto vector_setup_out;
13550 /* In Legacy mode, we do not have to get any other vector since we
13551 * piggyback on the misc/ICR0 for queue interrupts.
13553 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
13555 if (vsi->num_q_vectors)
13556 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
13557 vsi->num_q_vectors, vsi->idx);
13558 if (vsi->base_vector < 0) {
13559 dev_info(&pf->pdev->dev,
13560 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
13561 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
13562 i40e_vsi_free_q_vectors(vsi);
13564 goto vector_setup_out;
13572 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
13573 * @vsi: pointer to the vsi.
13575 * This re-allocates a vsi's queue resources.
13577 * Returns pointer to the successfully allocated and configured VSI sw struct
13578 * on success, otherwise returns NULL on failure.
13580 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
13582 u16 alloc_queue_pairs;
13583 struct i40e_pf *pf;
13592 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
13593 i40e_vsi_clear_rings(vsi);
13595 i40e_vsi_free_arrays(vsi, false);
13596 i40e_set_num_rings_in_vsi(vsi);
13597 ret = i40e_vsi_alloc_arrays(vsi, false);
13601 alloc_queue_pairs = vsi->alloc_queue_pairs *
13602 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
13604 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
13606 dev_info(&pf->pdev->dev,
13607 "failed to get tracking for %d queues for VSI %d err %d\n",
13608 alloc_queue_pairs, vsi->seid, ret);
13611 vsi->base_queue = ret;
13613 /* Update the FW view of the VSI. Force a reset of TC and queue
13614 * layout configurations.
13616 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
13617 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
13618 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
13619 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
13620 if (vsi->type == I40E_VSI_MAIN)
13621 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
13623 /* assign it some queues */
13624 ret = i40e_alloc_rings(vsi);
13628 /* map all of the rings to the q_vectors */
13629 i40e_vsi_map_rings_to_vectors(vsi);
13633 i40e_vsi_free_q_vectors(vsi);
13634 if (vsi->netdev_registered) {
13635 vsi->netdev_registered = false;
13636 unregister_netdev(vsi->netdev);
13637 free_netdev(vsi->netdev);
13638 vsi->netdev = NULL;
13640 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13642 i40e_vsi_clear(vsi);
13647 * i40e_vsi_setup - Set up a VSI by a given type
13648 * @pf: board private structure
13650 * @uplink_seid: the switch element to link to
13651 * @param1: usage depends upon VSI type. For VF types, indicates VF id
13653 * This allocates the sw VSI structure and its queue resources, then add a VSI
13654 * to the identified VEB.
13656 * Returns pointer to the successfully allocated and configure VSI sw struct on
13657 * success, otherwise returns NULL on failure.
13659 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
13660 u16 uplink_seid, u32 param1)
13662 struct i40e_vsi *vsi = NULL;
13663 struct i40e_veb *veb = NULL;
13664 u16 alloc_queue_pairs;
13668 /* The requested uplink_seid must be either
13669 * - the PF's port seid
13670 * no VEB is needed because this is the PF
13671 * or this is a Flow Director special case VSI
13672 * - seid of an existing VEB
13673 * - seid of a VSI that owns an existing VEB
13674 * - seid of a VSI that doesn't own a VEB
13675 * a new VEB is created and the VSI becomes the owner
13676 * - seid of the PF VSI, which is what creates the first VEB
13677 * this is a special case of the previous
13679 * Find which uplink_seid we were given and create a new VEB if needed
13681 for (i = 0; i < I40E_MAX_VEB; i++) {
13682 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
13688 if (!veb && uplink_seid != pf->mac_seid) {
13690 for (i = 0; i < pf->num_alloc_vsi; i++) {
13691 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
13697 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
13702 if (vsi->uplink_seid == pf->mac_seid)
13703 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
13704 vsi->tc_config.enabled_tc);
13705 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
13706 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
13707 vsi->tc_config.enabled_tc);
13709 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
13710 dev_info(&vsi->back->pdev->dev,
13711 "New VSI creation error, uplink seid of LAN VSI expected.\n");
13714 /* We come up by default in VEPA mode if SRIOV is not
13715 * already enabled, in which case we can't force VEPA
13718 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
13719 veb->bridge_mode = BRIDGE_MODE_VEPA;
13720 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
13722 i40e_config_bridge_mode(veb);
13724 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13725 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13729 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
13733 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13734 uplink_seid = veb->seid;
13737 /* get vsi sw struct */
13738 v_idx = i40e_vsi_mem_alloc(pf, type);
13741 vsi = pf->vsi[v_idx];
13745 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
13747 if (type == I40E_VSI_MAIN)
13748 pf->lan_vsi = v_idx;
13749 else if (type == I40E_VSI_SRIOV)
13750 vsi->vf_id = param1;
13751 /* assign it some queues */
13752 alloc_queue_pairs = vsi->alloc_queue_pairs *
13753 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
13755 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
13757 dev_info(&pf->pdev->dev,
13758 "failed to get tracking for %d queues for VSI %d err=%d\n",
13759 alloc_queue_pairs, vsi->seid, ret);
13762 vsi->base_queue = ret;
13764 /* get a VSI from the hardware */
13765 vsi->uplink_seid = uplink_seid;
13766 ret = i40e_add_vsi(vsi);
13770 switch (vsi->type) {
13771 /* setup the netdev if needed */
13772 case I40E_VSI_MAIN:
13773 case I40E_VSI_VMDQ2:
13774 ret = i40e_config_netdev(vsi);
13777 ret = register_netdev(vsi->netdev);
13780 vsi->netdev_registered = true;
13781 netif_carrier_off(vsi->netdev);
13782 #ifdef CONFIG_I40E_DCB
13783 /* Setup DCB netlink interface */
13784 i40e_dcbnl_setup(vsi);
13785 #endif /* CONFIG_I40E_DCB */
13787 case I40E_VSI_FDIR:
13788 /* set up vectors and rings if needed */
13789 ret = i40e_vsi_setup_vectors(vsi);
13793 ret = i40e_alloc_rings(vsi);
13797 /* map all of the rings to the q_vectors */
13798 i40e_vsi_map_rings_to_vectors(vsi);
13800 i40e_vsi_reset_stats(vsi);
13803 /* no netdev or rings for the other VSI types */
13807 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
13808 (vsi->type == I40E_VSI_VMDQ2)) {
13809 ret = i40e_vsi_config_rss(vsi);
13814 i40e_vsi_free_q_vectors(vsi);
13816 if (vsi->netdev_registered) {
13817 vsi->netdev_registered = false;
13818 unregister_netdev(vsi->netdev);
13819 free_netdev(vsi->netdev);
13820 vsi->netdev = NULL;
13823 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13825 i40e_vsi_clear(vsi);
13831 * i40e_veb_get_bw_info - Query VEB BW information
13832 * @veb: the veb to query
13834 * Query the Tx scheduler BW configuration data for given VEB
13836 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
13838 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
13839 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
13840 struct i40e_pf *pf = veb->pf;
13841 struct i40e_hw *hw = &pf->hw;
13846 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
13849 dev_info(&pf->pdev->dev,
13850 "query veb bw config failed, err %s aq_err %s\n",
13851 i40e_stat_str(&pf->hw, ret),
13852 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13856 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
13859 dev_info(&pf->pdev->dev,
13860 "query veb bw ets config failed, err %s aq_err %s\n",
13861 i40e_stat_str(&pf->hw, ret),
13862 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13866 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
13867 veb->bw_max_quanta = ets_data.tc_bw_max;
13868 veb->is_abs_credits = bw_data.absolute_credits_enable;
13869 veb->enabled_tc = ets_data.tc_valid_bits;
13870 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
13871 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
13872 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
13873 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
13874 veb->bw_tc_limit_credits[i] =
13875 le16_to_cpu(bw_data.tc_bw_limits[i]);
13876 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
13884 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
13885 * @pf: board private structure
13887 * On error: returns error code (negative)
13888 * On success: returns vsi index in PF (positive)
13890 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
13893 struct i40e_veb *veb;
13896 /* Need to protect the allocation of switch elements at the PF level */
13897 mutex_lock(&pf->switch_mutex);
13899 /* VEB list may be fragmented if VEB creation/destruction has
13900 * been happening. We can afford to do a quick scan to look
13901 * for any free slots in the list.
13903 * find next empty veb slot, looping back around if necessary
13906 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
13908 if (i >= I40E_MAX_VEB) {
13910 goto err_alloc_veb; /* out of VEB slots! */
13913 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
13916 goto err_alloc_veb;
13920 veb->enabled_tc = 1;
13925 mutex_unlock(&pf->switch_mutex);
13930 * i40e_switch_branch_release - Delete a branch of the switch tree
13931 * @branch: where to start deleting
13933 * This uses recursion to find the tips of the branch to be
13934 * removed, deleting until we get back to and can delete this VEB.
13936 static void i40e_switch_branch_release(struct i40e_veb *branch)
13938 struct i40e_pf *pf = branch->pf;
13939 u16 branch_seid = branch->seid;
13940 u16 veb_idx = branch->idx;
13943 /* release any VEBs on this VEB - RECURSION */
13944 for (i = 0; i < I40E_MAX_VEB; i++) {
13947 if (pf->veb[i]->uplink_seid == branch->seid)
13948 i40e_switch_branch_release(pf->veb[i]);
13951 /* Release the VSIs on this VEB, but not the owner VSI.
13953 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
13954 * the VEB itself, so don't use (*branch) after this loop.
13956 for (i = 0; i < pf->num_alloc_vsi; i++) {
13959 if (pf->vsi[i]->uplink_seid == branch_seid &&
13960 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13961 i40e_vsi_release(pf->vsi[i]);
13965 /* There's one corner case where the VEB might not have been
13966 * removed, so double check it here and remove it if needed.
13967 * This case happens if the veb was created from the debugfs
13968 * commands and no VSIs were added to it.
13970 if (pf->veb[veb_idx])
13971 i40e_veb_release(pf->veb[veb_idx]);
13975 * i40e_veb_clear - remove veb struct
13976 * @veb: the veb to remove
13978 static void i40e_veb_clear(struct i40e_veb *veb)
13984 struct i40e_pf *pf = veb->pf;
13986 mutex_lock(&pf->switch_mutex);
13987 if (pf->veb[veb->idx] == veb)
13988 pf->veb[veb->idx] = NULL;
13989 mutex_unlock(&pf->switch_mutex);
13996 * i40e_veb_release - Delete a VEB and free its resources
13997 * @veb: the VEB being removed
13999 void i40e_veb_release(struct i40e_veb *veb)
14001 struct i40e_vsi *vsi = NULL;
14002 struct i40e_pf *pf;
14007 /* find the remaining VSI and check for extras */
14008 for (i = 0; i < pf->num_alloc_vsi; i++) {
14009 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
14015 dev_info(&pf->pdev->dev,
14016 "can't remove VEB %d with %d VSIs left\n",
14021 /* move the remaining VSI to uplink veb */
14022 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
14023 if (veb->uplink_seid) {
14024 vsi->uplink_seid = veb->uplink_seid;
14025 if (veb->uplink_seid == pf->mac_seid)
14026 vsi->veb_idx = I40E_NO_VEB;
14028 vsi->veb_idx = veb->veb_idx;
14031 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
14032 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
14035 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14036 i40e_veb_clear(veb);
14040 * i40e_add_veb - create the VEB in the switch
14041 * @veb: the VEB to be instantiated
14042 * @vsi: the controlling VSI
14044 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
14046 struct i40e_pf *pf = veb->pf;
14047 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
14050 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
14051 veb->enabled_tc, false,
14052 &veb->seid, enable_stats, NULL);
14054 /* get a VEB from the hardware */
14056 dev_info(&pf->pdev->dev,
14057 "couldn't add VEB, err %s aq_err %s\n",
14058 i40e_stat_str(&pf->hw, ret),
14059 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14063 /* get statistics counter */
14064 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
14065 &veb->stats_idx, NULL, NULL, NULL);
14067 dev_info(&pf->pdev->dev,
14068 "couldn't get VEB statistics idx, err %s aq_err %s\n",
14069 i40e_stat_str(&pf->hw, ret),
14070 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14073 ret = i40e_veb_get_bw_info(veb);
14075 dev_info(&pf->pdev->dev,
14076 "couldn't get VEB bw info, err %s aq_err %s\n",
14077 i40e_stat_str(&pf->hw, ret),
14078 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14079 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14083 vsi->uplink_seid = veb->seid;
14084 vsi->veb_idx = veb->idx;
14085 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14091 * i40e_veb_setup - Set up a VEB
14092 * @pf: board private structure
14093 * @flags: VEB setup flags
14094 * @uplink_seid: the switch element to link to
14095 * @vsi_seid: the initial VSI seid
14096 * @enabled_tc: Enabled TC bit-map
14098 * This allocates the sw VEB structure and links it into the switch
14099 * It is possible and legal for this to be a duplicate of an already
14100 * existing VEB. It is also possible for both uplink and vsi seids
14101 * to be zero, in order to create a floating VEB.
14103 * Returns pointer to the successfully allocated VEB sw struct on
14104 * success, otherwise returns NULL on failure.
14106 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14107 u16 uplink_seid, u16 vsi_seid,
14110 struct i40e_veb *veb, *uplink_veb = NULL;
14111 int vsi_idx, veb_idx;
14114 /* if one seid is 0, the other must be 0 to create a floating relay */
14115 if ((uplink_seid == 0 || vsi_seid == 0) &&
14116 (uplink_seid + vsi_seid != 0)) {
14117 dev_info(&pf->pdev->dev,
14118 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14119 uplink_seid, vsi_seid);
14123 /* make sure there is such a vsi and uplink */
14124 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
14125 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
14127 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
14128 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
14133 if (uplink_seid && uplink_seid != pf->mac_seid) {
14134 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
14135 if (pf->veb[veb_idx] &&
14136 pf->veb[veb_idx]->seid == uplink_seid) {
14137 uplink_veb = pf->veb[veb_idx];
14142 dev_info(&pf->pdev->dev,
14143 "uplink seid %d not found\n", uplink_seid);
14148 /* get veb sw struct */
14149 veb_idx = i40e_veb_mem_alloc(pf);
14152 veb = pf->veb[veb_idx];
14153 veb->flags = flags;
14154 veb->uplink_seid = uplink_seid;
14155 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14156 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14158 /* create the VEB in the switch */
14159 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
14162 if (vsi_idx == pf->lan_vsi)
14163 pf->lan_veb = veb->idx;
14168 i40e_veb_clear(veb);
14174 * i40e_setup_pf_switch_element - set PF vars based on switch type
14175 * @pf: board private structure
14176 * @ele: element we are building info from
14177 * @num_reported: total number of elements
14178 * @printconfig: should we print the contents
14180 * helper function to assist in extracting a few useful SEID values.
14182 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14183 struct i40e_aqc_switch_config_element_resp *ele,
14184 u16 num_reported, bool printconfig)
14186 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14187 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14188 u8 element_type = ele->element_type;
14189 u16 seid = le16_to_cpu(ele->seid);
14192 dev_info(&pf->pdev->dev,
14193 "type=%d seid=%d uplink=%d downlink=%d\n",
14194 element_type, seid, uplink_seid, downlink_seid);
14196 switch (element_type) {
14197 case I40E_SWITCH_ELEMENT_TYPE_MAC:
14198 pf->mac_seid = seid;
14200 case I40E_SWITCH_ELEMENT_TYPE_VEB:
14202 if (uplink_seid != pf->mac_seid)
14204 if (pf->lan_veb >= I40E_MAX_VEB) {
14207 /* find existing or else empty VEB */
14208 for (v = 0; v < I40E_MAX_VEB; v++) {
14209 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14214 if (pf->lan_veb >= I40E_MAX_VEB) {
14215 v = i40e_veb_mem_alloc(pf);
14221 if (pf->lan_veb >= I40E_MAX_VEB)
14224 pf->veb[pf->lan_veb]->seid = seid;
14225 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14226 pf->veb[pf->lan_veb]->pf = pf;
14227 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
14229 case I40E_SWITCH_ELEMENT_TYPE_VSI:
14230 if (num_reported != 1)
14232 /* This is immediately after a reset so we can assume this is
14235 pf->mac_seid = uplink_seid;
14236 pf->pf_seid = downlink_seid;
14237 pf->main_vsi_seid = seid;
14239 dev_info(&pf->pdev->dev,
14240 "pf_seid=%d main_vsi_seid=%d\n",
14241 pf->pf_seid, pf->main_vsi_seid);
14243 case I40E_SWITCH_ELEMENT_TYPE_PF:
14244 case I40E_SWITCH_ELEMENT_TYPE_VF:
14245 case I40E_SWITCH_ELEMENT_TYPE_EMP:
14246 case I40E_SWITCH_ELEMENT_TYPE_BMC:
14247 case I40E_SWITCH_ELEMENT_TYPE_PE:
14248 case I40E_SWITCH_ELEMENT_TYPE_PA:
14249 /* ignore these for now */
14252 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14253 element_type, seid);
14259 * i40e_fetch_switch_configuration - Get switch config from firmware
14260 * @pf: board private structure
14261 * @printconfig: should we print the contents
14263 * Get the current switch configuration from the device and
14264 * extract a few useful SEID values.
14266 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14268 struct i40e_aqc_get_switch_config_resp *sw_config;
14274 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
14278 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
14280 u16 num_reported, num_total;
14282 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
14286 dev_info(&pf->pdev->dev,
14287 "get switch config failed err %s aq_err %s\n",
14288 i40e_stat_str(&pf->hw, ret),
14289 i40e_aq_str(&pf->hw,
14290 pf->hw.aq.asq_last_status));
14295 num_reported = le16_to_cpu(sw_config->header.num_reported);
14296 num_total = le16_to_cpu(sw_config->header.num_total);
14299 dev_info(&pf->pdev->dev,
14300 "header: %d reported %d total\n",
14301 num_reported, num_total);
14303 for (i = 0; i < num_reported; i++) {
14304 struct i40e_aqc_switch_config_element_resp *ele =
14305 &sw_config->element[i];
14307 i40e_setup_pf_switch_element(pf, ele, num_reported,
14310 } while (next_seid != 0);
14317 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
14318 * @pf: board private structure
14319 * @reinit: if the Main VSI needs to re-initialized.
14321 * Returns 0 on success, negative value on failure
14323 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
14328 /* find out what's out there already */
14329 ret = i40e_fetch_switch_configuration(pf, false);
14331 dev_info(&pf->pdev->dev,
14332 "couldn't fetch switch config, err %s aq_err %s\n",
14333 i40e_stat_str(&pf->hw, ret),
14334 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14337 i40e_pf_reset_stats(pf);
14339 /* set the switch config bit for the whole device to
14340 * support limited promisc or true promisc
14341 * when user requests promisc. The default is limited
14345 if ((pf->hw.pf_id == 0) &&
14346 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
14347 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14348 pf->last_sw_conf_flags = flags;
14351 if (pf->hw.pf_id == 0) {
14354 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14355 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
14357 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
14358 dev_info(&pf->pdev->dev,
14359 "couldn't set switch config bits, err %s aq_err %s\n",
14360 i40e_stat_str(&pf->hw, ret),
14361 i40e_aq_str(&pf->hw,
14362 pf->hw.aq.asq_last_status));
14363 /* not a fatal problem, just keep going */
14365 pf->last_sw_conf_valid_flags = valid_flags;
14368 /* first time setup */
14369 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
14370 struct i40e_vsi *vsi = NULL;
14373 /* Set up the PF VSI associated with the PF's main VSI
14374 * that is already in the HW switch
14376 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
14377 uplink_seid = pf->veb[pf->lan_veb]->seid;
14379 uplink_seid = pf->mac_seid;
14380 if (pf->lan_vsi == I40E_NO_VSI)
14381 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
14383 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
14385 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
14386 i40e_cloud_filter_exit(pf);
14387 i40e_fdir_teardown(pf);
14391 /* force a reset of TC and queue layout configurations */
14392 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14394 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14395 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14396 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14398 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
14400 i40e_fdir_sb_setup(pf);
14402 /* Setup static PF queue filter control settings */
14403 ret = i40e_setup_pf_filter_control(pf);
14405 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
14407 /* Failure here should not stop continuing other steps */
14410 /* enable RSS in the HW, even for only one queue, as the stack can use
14413 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
14414 i40e_pf_config_rss(pf);
14416 /* fill in link information and enable LSE reporting */
14417 i40e_link_event(pf);
14419 /* Initialize user-specific link properties */
14420 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
14421 I40E_AQ_AN_COMPLETED) ? true : false);
14425 /* repopulate tunnel port filters */
14426 i40e_sync_udp_filters(pf);
14432 * i40e_determine_queue_usage - Work out queue distribution
14433 * @pf: board private structure
14435 static void i40e_determine_queue_usage(struct i40e_pf *pf)
14440 pf->num_lan_qps = 0;
14442 /* Find the max queues to be put into basic use. We'll always be
14443 * using TC0, whether or not DCB is running, and TC0 will get the
14446 queues_left = pf->hw.func_caps.num_tx_qp;
14448 if ((queues_left == 1) ||
14449 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
14450 /* one qp for PF, no queues for anything else */
14452 pf->alloc_rss_size = pf->num_lan_qps = 1;
14454 /* make sure all the fancies are disabled */
14455 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14456 I40E_FLAG_IWARP_ENABLED |
14457 I40E_FLAG_FD_SB_ENABLED |
14458 I40E_FLAG_FD_ATR_ENABLED |
14459 I40E_FLAG_DCB_CAPABLE |
14460 I40E_FLAG_DCB_ENABLED |
14461 I40E_FLAG_SRIOV_ENABLED |
14462 I40E_FLAG_VMDQ_ENABLED);
14463 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14464 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
14465 I40E_FLAG_FD_SB_ENABLED |
14466 I40E_FLAG_FD_ATR_ENABLED |
14467 I40E_FLAG_DCB_CAPABLE))) {
14468 /* one qp for PF */
14469 pf->alloc_rss_size = pf->num_lan_qps = 1;
14470 queues_left -= pf->num_lan_qps;
14472 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14473 I40E_FLAG_IWARP_ENABLED |
14474 I40E_FLAG_FD_SB_ENABLED |
14475 I40E_FLAG_FD_ATR_ENABLED |
14476 I40E_FLAG_DCB_ENABLED |
14477 I40E_FLAG_VMDQ_ENABLED);
14478 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14480 /* Not enough queues for all TCs */
14481 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
14482 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
14483 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
14484 I40E_FLAG_DCB_ENABLED);
14485 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
14488 /* limit lan qps to the smaller of qps, cpus or msix */
14489 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
14490 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
14491 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
14492 pf->num_lan_qps = q_max;
14494 queues_left -= pf->num_lan_qps;
14497 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14498 if (queues_left > 1) {
14499 queues_left -= 1; /* save 1 queue for FD */
14501 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
14502 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14503 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
14507 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14508 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
14509 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
14510 (queues_left / pf->num_vf_qps));
14511 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
14514 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
14515 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
14516 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
14517 (queues_left / pf->num_vmdq_qps));
14518 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
14521 pf->queues_left = queues_left;
14522 dev_dbg(&pf->pdev->dev,
14523 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
14524 pf->hw.func_caps.num_tx_qp,
14525 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
14526 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
14527 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
14532 * i40e_setup_pf_filter_control - Setup PF static filter control
14533 * @pf: PF to be setup
14535 * i40e_setup_pf_filter_control sets up a PF's initial filter control
14536 * settings. If PE/FCoE are enabled then it will also set the per PF
14537 * based filter sizes required for them. It also enables Flow director,
14538 * ethertype and macvlan type filter settings for the pf.
14540 * Returns 0 on success, negative on failure
14542 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
14544 struct i40e_filter_control_settings *settings = &pf->filter_settings;
14546 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
14548 /* Flow Director is enabled */
14549 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
14550 settings->enable_fdir = true;
14552 /* Ethtype and MACVLAN filters enabled for PF */
14553 settings->enable_ethtype = true;
14554 settings->enable_macvlan = true;
14556 if (i40e_set_filter_control(&pf->hw, settings))
14562 #define INFO_STRING_LEN 255
14563 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
14564 static void i40e_print_features(struct i40e_pf *pf)
14566 struct i40e_hw *hw = &pf->hw;
14570 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
14574 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
14575 #ifdef CONFIG_PCI_IOV
14576 i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
14578 i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
14579 pf->hw.func_caps.num_vsis,
14580 pf->vsi[pf->lan_vsi]->num_queue_pairs);
14581 if (pf->flags & I40E_FLAG_RSS_ENABLED)
14582 i += scnprintf(&buf[i], REMAIN(i), " RSS");
14583 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
14584 i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
14585 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14586 i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
14587 i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
14589 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
14590 i += scnprintf(&buf[i], REMAIN(i), " DCB");
14591 i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
14592 i += scnprintf(&buf[i], REMAIN(i), " Geneve");
14593 if (pf->flags & I40E_FLAG_PTP)
14594 i += scnprintf(&buf[i], REMAIN(i), " PTP");
14595 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
14596 i += scnprintf(&buf[i], REMAIN(i), " VEB");
14598 i += scnprintf(&buf[i], REMAIN(i), " VEPA");
14600 dev_info(&pf->pdev->dev, "%s\n", buf);
14602 WARN_ON(i > INFO_STRING_LEN);
14606 * i40e_get_platform_mac_addr - get platform-specific MAC address
14607 * @pdev: PCI device information struct
14608 * @pf: board private structure
14610 * Look up the MAC address for the device. First we'll try
14611 * eth_platform_get_mac_address, which will check Open Firmware, or arch
14612 * specific fallback. Otherwise, we'll default to the stored value in
14615 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
14617 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
14618 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
14622 * i40e_set_fec_in_flags - helper function for setting FEC options in flags
14623 * @fec_cfg: FEC option to set in flags
14624 * @flags: ptr to flags in which we set FEC option
14626 void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
14628 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
14629 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
14630 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
14631 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
14632 *flags |= I40E_FLAG_RS_FEC;
14633 *flags &= ~I40E_FLAG_BASE_R_FEC;
14635 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
14636 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
14637 *flags |= I40E_FLAG_BASE_R_FEC;
14638 *flags &= ~I40E_FLAG_RS_FEC;
14641 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
14645 * i40e_check_recovery_mode - check if we are running transition firmware
14646 * @pf: board private structure
14648 * Check registers indicating the firmware runs in recovery mode. Sets the
14649 * appropriate driver state.
14651 * Returns true if the recovery mode was detected, false otherwise
14653 static bool i40e_check_recovery_mode(struct i40e_pf *pf)
14655 u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
14657 if (val & I40E_GL_FWSTS_FWS1B_MASK) {
14658 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
14659 dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
14660 set_bit(__I40E_RECOVERY_MODE, pf->state);
14664 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
14665 dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
14671 * i40e_pf_loop_reset - perform reset in a loop.
14672 * @pf: board private structure
14674 * This function is useful when a NIC is about to enter recovery mode.
14675 * When a NIC's internal data structures are corrupted the NIC's
14676 * firmware is going to enter recovery mode.
14677 * Right after a POR it takes about 7 minutes for firmware to enter
14678 * recovery mode. Until that time a NIC is in some kind of intermediate
14679 * state. After that time period the NIC almost surely enters
14680 * recovery mode. The only way for a driver to detect intermediate
14681 * state is to issue a series of pf-resets and check a return value.
14682 * If a PF reset returns success then the firmware could be in recovery
14683 * mode so the caller of this code needs to check for recovery mode
14684 * if this function returns success. There is a little chance that
14685 * firmware will hang in intermediate state forever.
14686 * Since waiting 7 minutes is quite a lot of time this function waits
14687 * 10 seconds and then gives up by returning an error.
14689 * Return 0 on success, negative on failure.
14691 static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
14693 /* wait max 10 seconds for PF reset to succeed */
14694 const unsigned long time_end = jiffies + 10 * HZ;
14696 struct i40e_hw *hw = &pf->hw;
14699 ret = i40e_pf_reset(hw);
14700 while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
14701 usleep_range(10000, 20000);
14702 ret = i40e_pf_reset(hw);
14705 if (ret == I40E_SUCCESS)
14708 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
14714 * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
14715 * @pf: board private structure
14717 * Check FW registers to determine if FW issued unexpected EMP Reset.
14718 * Every time when unexpected EMP Reset occurs the FW increments
14719 * a counter of unexpected EMP Resets. When the counter reaches 10
14720 * the FW should enter the Recovery mode
14722 * Returns true if FW issued unexpected EMP Reset
14724 static bool i40e_check_fw_empr(struct i40e_pf *pf)
14726 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
14727 I40E_GL_FWSTS_FWS1B_MASK;
14728 return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
14729 (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
14733 * i40e_handle_resets - handle EMP resets and PF resets
14734 * @pf: board private structure
14736 * Handle both EMP resets and PF resets and conclude whether there are
14737 * any issues regarding these resets. If there are any issues then
14738 * generate log entry.
14740 * Return 0 if NIC is healthy or negative value when there are issues
14743 static i40e_status i40e_handle_resets(struct i40e_pf *pf)
14745 const i40e_status pfr = i40e_pf_loop_reset(pf);
14746 const bool is_empr = i40e_check_fw_empr(pf);
14748 if (is_empr || pfr != I40E_SUCCESS)
14749 dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
14751 return is_empr ? I40E_ERR_RESET_FAILED : pfr;
14755 * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
14756 * @pf: board private structure
14757 * @hw: ptr to the hardware info
14759 * This function does a minimal setup of all subsystems needed for running
14762 * Returns 0 on success, negative on failure
14764 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
14766 struct i40e_vsi *vsi;
14770 pci_save_state(pf->pdev);
14772 /* set up periodic task facility */
14773 timer_setup(&pf->service_timer, i40e_service_timer, 0);
14774 pf->service_timer_period = HZ;
14776 INIT_WORK(&pf->service_task, i40e_service_task);
14777 clear_bit(__I40E_SERVICE_SCHED, pf->state);
14779 err = i40e_init_interrupt_scheme(pf);
14781 goto err_switch_setup;
14783 /* The number of VSIs reported by the FW is the minimum guaranteed
14784 * to us; HW supports far more and we share the remaining pool with
14785 * the other PFs. We allocate space for more than the guarantee with
14786 * the understanding that we might not get them all later.
14788 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
14789 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
14791 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
14793 /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
14794 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
14798 goto err_switch_setup;
14801 /* We allocate one VSI which is needed as absolute minimum
14802 * in order to register the netdev
14804 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
14806 goto err_switch_setup;
14807 pf->lan_vsi = v_idx;
14808 vsi = pf->vsi[v_idx];
14810 goto err_switch_setup;
14811 vsi->alloc_queue_pairs = 1;
14812 err = i40e_config_netdev(vsi);
14814 goto err_switch_setup;
14815 err = register_netdev(vsi->netdev);
14817 goto err_switch_setup;
14818 vsi->netdev_registered = true;
14819 i40e_dbg_pf_init(pf);
14821 err = i40e_setup_misc_vector_for_recovery_mode(pf);
14823 goto err_switch_setup;
14825 /* tell the firmware that we're starting */
14826 i40e_send_version(pf);
14828 /* since everything's happy, start the service_task timer */
14829 mod_timer(&pf->service_timer,
14830 round_jiffies(jiffies + pf->service_timer_period));
14835 i40e_reset_interrupt_capability(pf);
14836 del_timer_sync(&pf->service_timer);
14837 i40e_shutdown_adminq(hw);
14838 iounmap(hw->hw_addr);
14839 pci_disable_pcie_error_reporting(pf->pdev);
14840 pci_release_mem_regions(pf->pdev);
14841 pci_disable_device(pf->pdev);
14848 * i40e_probe - Device initialization routine
14849 * @pdev: PCI device information struct
14850 * @ent: entry in i40e_pci_tbl
14852 * i40e_probe initializes a PF identified by a pci_dev structure.
14853 * The OS initialization, configuring of the PF private structure,
14854 * and a hardware reset occur.
14856 * Returns 0 on success, negative on failure
14858 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
14860 struct i40e_aq_get_phy_abilities_resp abilities;
14861 struct i40e_pf *pf;
14862 struct i40e_hw *hw;
14863 static u16 pfs_found;
14871 err = pci_enable_device_mem(pdev);
14875 /* set up for high or low dma */
14876 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
14878 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
14880 dev_err(&pdev->dev,
14881 "DMA configuration failed: 0x%x\n", err);
14886 /* set up pci connections */
14887 err = pci_request_mem_regions(pdev, i40e_driver_name);
14889 dev_info(&pdev->dev,
14890 "pci_request_selected_regions failed %d\n", err);
14894 pci_enable_pcie_error_reporting(pdev);
14895 pci_set_master(pdev);
14897 /* Now that we have a PCI connection, we need to do the
14898 * low level device setup. This is primarily setting up
14899 * the Admin Queue structures and then querying for the
14900 * device's current profile information.
14902 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
14909 set_bit(__I40E_DOWN, pf->state);
14914 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
14915 I40E_MAX_CSR_SPACE);
14916 /* We believe that the highest register to read is
14917 * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
14918 * is not less than that before mapping to prevent a
14921 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
14922 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
14927 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
14928 if (!hw->hw_addr) {
14930 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
14931 (unsigned int)pci_resource_start(pdev, 0),
14932 pf->ioremap_len, err);
14935 hw->vendor_id = pdev->vendor;
14936 hw->device_id = pdev->device;
14937 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
14938 hw->subsystem_vendor_id = pdev->subsystem_vendor;
14939 hw->subsystem_device_id = pdev->subsystem_device;
14940 hw->bus.device = PCI_SLOT(pdev->devfn);
14941 hw->bus.func = PCI_FUNC(pdev->devfn);
14942 hw->bus.bus_id = pdev->bus->number;
14943 pf->instance = pfs_found;
14945 /* Select something other than the 802.1ad ethertype for the
14946 * switch to use internally and drop on ingress.
14948 hw->switch_tag = 0xffff;
14949 hw->first_tag = ETH_P_8021AD;
14950 hw->second_tag = ETH_P_8021Q;
14952 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
14953 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
14954 INIT_LIST_HEAD(&pf->ddp_old_prof);
14956 /* set up the locks for the AQ, do this only once in probe
14957 * and destroy them only once in remove
14959 mutex_init(&hw->aq.asq_mutex);
14960 mutex_init(&hw->aq.arq_mutex);
14962 pf->msg_enable = netif_msg_init(debug,
14967 pf->hw.debug_mask = debug;
14969 /* do a special CORER for clearing PXE mode once at init */
14970 if (hw->revision_id == 0 &&
14971 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
14972 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
14977 i40e_clear_pxe_mode(hw);
14980 /* Reset here to make sure all is clean and to define PF 'n' */
14983 err = i40e_set_mac_type(hw);
14985 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
14990 err = i40e_handle_resets(pf);
14994 i40e_check_recovery_mode(pf);
14996 hw->aq.num_arq_entries = I40E_AQ_LEN;
14997 hw->aq.num_asq_entries = I40E_AQ_LEN;
14998 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14999 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15000 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
15002 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
15004 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
15006 err = i40e_init_shared_code(hw);
15008 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15013 /* set up a default setting for link flow control */
15014 pf->hw.fc.requested_mode = I40E_FC_NONE;
15016 err = i40e_init_adminq(hw);
15018 if (err == I40E_ERR_FIRMWARE_API_VERSION)
15019 dev_info(&pdev->dev,
15020 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
15021 hw->aq.api_maj_ver,
15022 hw->aq.api_min_ver,
15023 I40E_FW_API_VERSION_MAJOR,
15024 I40E_FW_MINOR_VERSION(hw));
15026 dev_info(&pdev->dev,
15027 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
15031 i40e_get_oem_version(hw);
15033 /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
15034 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
15035 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
15036 hw->aq.api_maj_ver, hw->aq.api_min_ver,
15037 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
15038 hw->subsystem_vendor_id, hw->subsystem_device_id);
15040 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
15041 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
15042 dev_info(&pdev->dev,
15043 "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
15044 hw->aq.api_maj_ver,
15045 hw->aq.api_min_ver,
15046 I40E_FW_API_VERSION_MAJOR,
15047 I40E_FW_MINOR_VERSION(hw));
15048 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
15049 dev_info(&pdev->dev,
15050 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
15051 hw->aq.api_maj_ver,
15052 hw->aq.api_min_ver,
15053 I40E_FW_API_VERSION_MAJOR,
15054 I40E_FW_MINOR_VERSION(hw));
15056 i40e_verify_eeprom(pf);
15058 /* Rev 0 hardware was never productized */
15059 if (hw->revision_id < 1)
15060 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
15062 i40e_clear_pxe_mode(hw);
15064 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
15066 goto err_adminq_setup;
15068 err = i40e_sw_init(pf);
15070 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
15074 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15075 return i40e_init_recovery_mode(pf, hw);
15077 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
15078 hw->func_caps.num_rx_qp, 0, 0);
15080 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
15081 goto err_init_lan_hmc;
15084 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
15086 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
15088 goto err_configure_lan_hmc;
15091 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
15092 * Ignore error return codes because if it was already disabled via
15093 * hardware settings this will fail
15095 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
15096 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
15097 i40e_aq_stop_lldp(hw, true, false, NULL);
15100 /* allow a platform config to override the HW addr */
15101 i40e_get_platform_mac_addr(pdev, pf);
15103 if (!is_valid_ether_addr(hw->mac.addr)) {
15104 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
15108 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
15109 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
15110 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
15111 if (is_valid_ether_addr(hw->mac.port_addr))
15112 pf->hw_features |= I40E_HW_PORT_ID_VALID;
15114 pci_set_drvdata(pdev, pf);
15115 pci_save_state(pdev);
15117 dev_info(&pdev->dev,
15118 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
15119 "FW LLDP is disabled\n" :
15120 "FW LLDP is enabled\n");
15122 /* Enable FW to write default DCB config on link-up */
15123 i40e_aq_set_dcb_parameters(hw, true, NULL);
15125 #ifdef CONFIG_I40E_DCB
15126 err = i40e_init_pf_dcb(pf);
15128 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
15129 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
15130 /* Continue without DCB enabled */
15132 #endif /* CONFIG_I40E_DCB */
15134 /* set up periodic task facility */
15135 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15136 pf->service_timer_period = HZ;
15138 INIT_WORK(&pf->service_task, i40e_service_task);
15139 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15141 /* NVM bit on means WoL disabled for the port */
15142 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
15143 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
15144 pf->wol_en = false;
15147 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
15149 /* set up the main switch operations */
15150 i40e_determine_queue_usage(pf);
15151 err = i40e_init_interrupt_scheme(pf);
15153 goto err_switch_setup;
15155 /* The number of VSIs reported by the FW is the minimum guaranteed
15156 * to us; HW supports far more and we share the remaining pool with
15157 * the other PFs. We allocate space for more than the guarantee with
15158 * the understanding that we might not get them all later.
15160 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15161 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15163 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15165 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
15166 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15170 goto err_switch_setup;
15173 #ifdef CONFIG_PCI_IOV
15174 /* prep for VF support */
15175 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15176 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15177 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15178 if (pci_num_vf(pdev))
15179 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
15182 err = i40e_setup_pf_switch(pf, false);
15184 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15187 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15189 /* Make sure flow control is set according to current settings */
15190 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
15191 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
15192 dev_dbg(&pf->pdev->dev,
15193 "Set fc with err %s aq_err %s on get_phy_cap\n",
15194 i40e_stat_str(hw, err),
15195 i40e_aq_str(hw, hw->aq.asq_last_status));
15196 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
15197 dev_dbg(&pf->pdev->dev,
15198 "Set fc with err %s aq_err %s on set_phy_config\n",
15199 i40e_stat_str(hw, err),
15200 i40e_aq_str(hw, hw->aq.asq_last_status));
15201 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
15202 dev_dbg(&pf->pdev->dev,
15203 "Set fc with err %s aq_err %s on get_link_info\n",
15204 i40e_stat_str(hw, err),
15205 i40e_aq_str(hw, hw->aq.asq_last_status));
15207 /* if FDIR VSI was set up, start it now */
15208 for (i = 0; i < pf->num_alloc_vsi; i++) {
15209 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
15210 i40e_vsi_open(pf->vsi[i]);
15215 /* The driver only wants link up/down and module qualification
15216 * reports from firmware. Note the negative logic.
15218 err = i40e_aq_set_phy_int_mask(&pf->hw,
15219 ~(I40E_AQ_EVENT_LINK_UPDOWN |
15220 I40E_AQ_EVENT_MEDIA_NA |
15221 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
15223 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
15224 i40e_stat_str(&pf->hw, err),
15225 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15227 /* Reconfigure hardware for allowing smaller MSS in the case
15228 * of TSO, so that we avoid the MDD being fired and causing
15229 * a reset in the case of small MSS+TSO.
15231 val = rd32(hw, I40E_REG_MSS);
15232 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
15233 val &= ~I40E_REG_MSS_MIN_MASK;
15234 val |= I40E_64BYTE_MSS;
15235 wr32(hw, I40E_REG_MSS, val);
15238 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
15240 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
15242 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
15243 i40e_stat_str(&pf->hw, err),
15244 i40e_aq_str(&pf->hw,
15245 pf->hw.aq.asq_last_status));
15247 /* The main driver is (mostly) up and happy. We need to set this state
15248 * before setting up the misc vector or we get a race and the vector
15249 * ends up disabled forever.
15251 clear_bit(__I40E_DOWN, pf->state);
15253 /* In case of MSIX we are going to setup the misc vector right here
15254 * to handle admin queue events etc. In case of legacy and MSI
15255 * the misc functionality and queue processing is combined in
15256 * the same vector and that gets setup at open.
15258 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
15259 err = i40e_setup_misc_vector(pf);
15261 dev_info(&pdev->dev,
15262 "setup of misc vector failed: %d\n", err);
15267 #ifdef CONFIG_PCI_IOV
15268 /* prep for VF support */
15269 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15270 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15271 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15272 /* disable link interrupts for VFs */
15273 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
15274 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
15275 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
15278 if (pci_num_vf(pdev)) {
15279 dev_info(&pdev->dev,
15280 "Active VFs found, allocating resources.\n");
15281 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
15283 dev_info(&pdev->dev,
15284 "Error %d allocating resources for existing VFs\n",
15288 #endif /* CONFIG_PCI_IOV */
15290 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15291 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
15292 pf->num_iwarp_msix,
15293 I40E_IWARP_IRQ_PILE_ID);
15294 if (pf->iwarp_base_vector < 0) {
15295 dev_info(&pdev->dev,
15296 "failed to get tracking for %d vectors for IWARP err=%d\n",
15297 pf->num_iwarp_msix, pf->iwarp_base_vector);
15298 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
15302 i40e_dbg_pf_init(pf);
15304 /* tell the firmware that we're starting */
15305 i40e_send_version(pf);
15307 /* since everything's happy, start the service_task timer */
15308 mod_timer(&pf->service_timer,
15309 round_jiffies(jiffies + pf->service_timer_period));
15311 /* add this PF to client device list and launch a client service task */
15312 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15313 err = i40e_lan_add_device(pf);
15315 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
15319 #define PCI_SPEED_SIZE 8
15320 #define PCI_WIDTH_SIZE 8
15321 /* Devices on the IOSF bus do not have this information
15322 * and will report PCI Gen 1 x 1 by default so don't bother
15325 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
15326 char speed[PCI_SPEED_SIZE] = "Unknown";
15327 char width[PCI_WIDTH_SIZE] = "Unknown";
15329 /* Get the negotiated link width and speed from PCI config
15332 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
15335 i40e_set_pci_config_data(hw, link_status);
15337 switch (hw->bus.speed) {
15338 case i40e_bus_speed_8000:
15339 strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
15340 case i40e_bus_speed_5000:
15341 strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
15342 case i40e_bus_speed_2500:
15343 strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
15347 switch (hw->bus.width) {
15348 case i40e_bus_width_pcie_x8:
15349 strlcpy(width, "8", PCI_WIDTH_SIZE); break;
15350 case i40e_bus_width_pcie_x4:
15351 strlcpy(width, "4", PCI_WIDTH_SIZE); break;
15352 case i40e_bus_width_pcie_x2:
15353 strlcpy(width, "2", PCI_WIDTH_SIZE); break;
15354 case i40e_bus_width_pcie_x1:
15355 strlcpy(width, "1", PCI_WIDTH_SIZE); break;
15360 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
15363 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
15364 hw->bus.speed < i40e_bus_speed_8000) {
15365 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
15366 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
15370 /* get the requested speeds from the fw */
15371 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
15373 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
15374 i40e_stat_str(&pf->hw, err),
15375 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15376 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
15378 /* set the FEC config due to the board capabilities */
15379 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
15381 /* get the supported phy types from the fw */
15382 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
15384 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
15385 i40e_stat_str(&pf->hw, err),
15386 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15388 /* make sure the MFS hasn't been set lower than the default */
15389 #define MAX_FRAME_SIZE_DEFAULT 0x2600
15390 val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
15391 I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
15392 if (val < MAX_FRAME_SIZE_DEFAULT)
15393 dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
15396 /* Add a filter to drop all Flow control frames from any VSI from being
15397 * transmitted. By doing so we stop a malicious VF from sending out
15398 * PAUSE or PFC frames and potentially controlling traffic for other
15400 * The FW can still send Flow control frames if enabled.
15402 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
15403 pf->main_vsi_seid);
15405 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
15406 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
15407 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
15408 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
15409 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
15410 /* print a string summarizing features */
15411 i40e_print_features(pf);
15415 /* Unwind what we've done if something failed in the setup */
15417 set_bit(__I40E_DOWN, pf->state);
15418 i40e_clear_interrupt_scheme(pf);
15421 i40e_reset_interrupt_capability(pf);
15422 del_timer_sync(&pf->service_timer);
15424 err_configure_lan_hmc:
15425 (void)i40e_shutdown_lan_hmc(hw);
15427 kfree(pf->qp_pile);
15431 iounmap(hw->hw_addr);
15435 pci_disable_pcie_error_reporting(pdev);
15436 pci_release_mem_regions(pdev);
15439 pci_disable_device(pdev);
15444 * i40e_remove - Device removal routine
15445 * @pdev: PCI device information struct
15447 * i40e_remove is called by the PCI subsystem to alert the driver
15448 * that is should release a PCI device. This could be caused by a
15449 * Hot-Plug event, or because the driver is going to be removed from
15452 static void i40e_remove(struct pci_dev *pdev)
15454 struct i40e_pf *pf = pci_get_drvdata(pdev);
15455 struct i40e_hw *hw = &pf->hw;
15456 i40e_status ret_code;
15459 i40e_dbg_pf_exit(pf);
15463 /* Disable RSS in hw */
15464 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
15465 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
15467 while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
15468 usleep_range(1000, 2000);
15470 /* no more scheduling of any task */
15471 set_bit(__I40E_SUSPENDED, pf->state);
15472 set_bit(__I40E_DOWN, pf->state);
15473 if (pf->service_timer.function)
15474 del_timer_sync(&pf->service_timer);
15475 if (pf->service_task.func)
15476 cancel_work_sync(&pf->service_task);
15478 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
15479 struct i40e_vsi *vsi = pf->vsi[0];
15481 /* We know that we have allocated only one vsi for this PF,
15482 * it was just for registering netdevice, so the interface
15483 * could be visible in the 'ifconfig' output
15485 unregister_netdev(vsi->netdev);
15486 free_netdev(vsi->netdev);
15491 /* Client close must be called explicitly here because the timer
15492 * has been stopped.
15494 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15496 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
15498 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
15501 i40e_fdir_teardown(pf);
15503 /* If there is a switch structure or any orphans, remove them.
15504 * This will leave only the PF's VSI remaining.
15506 for (i = 0; i < I40E_MAX_VEB; i++) {
15510 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
15511 pf->veb[i]->uplink_seid == 0)
15512 i40e_switch_branch_release(pf->veb[i]);
15515 /* Now we can shutdown the PF's VSI, just before we kill
15518 if (pf->vsi[pf->lan_vsi])
15519 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
15521 i40e_cloud_filter_exit(pf);
15523 /* remove attached clients */
15524 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15525 ret_code = i40e_lan_del_device(pf);
15527 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
15531 /* shutdown and destroy the HMC */
15532 if (hw->hmc.hmc_obj) {
15533 ret_code = i40e_shutdown_lan_hmc(hw);
15535 dev_warn(&pdev->dev,
15536 "Failed to destroy the HMC resources: %d\n",
15541 /* Free MSI/legacy interrupt 0 when in recovery mode. */
15542 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15543 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15544 free_irq(pf->pdev->irq, pf);
15546 /* shutdown the adminq */
15547 i40e_shutdown_adminq(hw);
15549 /* destroy the locks only once, here */
15550 mutex_destroy(&hw->aq.arq_mutex);
15551 mutex_destroy(&hw->aq.asq_mutex);
15553 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
15555 i40e_clear_interrupt_scheme(pf);
15556 for (i = 0; i < pf->num_alloc_vsi; i++) {
15558 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
15559 i40e_vsi_clear_rings(pf->vsi[i]);
15560 i40e_vsi_clear(pf->vsi[i]);
15566 for (i = 0; i < I40E_MAX_VEB; i++) {
15571 kfree(pf->qp_pile);
15574 iounmap(hw->hw_addr);
15576 pci_release_mem_regions(pdev);
15578 pci_disable_pcie_error_reporting(pdev);
15579 pci_disable_device(pdev);
15583 * i40e_pci_error_detected - warning that something funky happened in PCI land
15584 * @pdev: PCI device information struct
15585 * @error: the type of PCI error
15587 * Called to warn that something happened and the error handling steps
15588 * are in progress. Allows the driver to quiesce things, be ready for
15591 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
15592 pci_channel_state_t error)
15594 struct i40e_pf *pf = pci_get_drvdata(pdev);
15596 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
15599 dev_info(&pdev->dev,
15600 "Cannot recover - error happened during device probe\n");
15601 return PCI_ERS_RESULT_DISCONNECT;
15604 /* shutdown all operations */
15605 if (!test_bit(__I40E_SUSPENDED, pf->state))
15606 i40e_prep_for_reset(pf, false);
15608 /* Request a slot reset */
15609 return PCI_ERS_RESULT_NEED_RESET;
15613 * i40e_pci_error_slot_reset - a PCI slot reset just happened
15614 * @pdev: PCI device information struct
15616 * Called to find if the driver can work with the device now that
15617 * the pci slot has been reset. If a basic connection seems good
15618 * (registers are readable and have sane content) then return a
15619 * happy little PCI_ERS_RESULT_xxx.
15621 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
15623 struct i40e_pf *pf = pci_get_drvdata(pdev);
15624 pci_ers_result_t result;
15627 dev_dbg(&pdev->dev, "%s\n", __func__);
15628 if (pci_enable_device_mem(pdev)) {
15629 dev_info(&pdev->dev,
15630 "Cannot re-enable PCI device after reset.\n");
15631 result = PCI_ERS_RESULT_DISCONNECT;
15633 pci_set_master(pdev);
15634 pci_restore_state(pdev);
15635 pci_save_state(pdev);
15636 pci_wake_from_d3(pdev, false);
15638 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
15640 result = PCI_ERS_RESULT_RECOVERED;
15642 result = PCI_ERS_RESULT_DISCONNECT;
15649 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
15650 * @pdev: PCI device information struct
15652 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
15654 struct i40e_pf *pf = pci_get_drvdata(pdev);
15656 i40e_prep_for_reset(pf, false);
15660 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
15661 * @pdev: PCI device information struct
15663 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
15665 struct i40e_pf *pf = pci_get_drvdata(pdev);
15667 i40e_reset_and_rebuild(pf, false, false);
15671 * i40e_pci_error_resume - restart operations after PCI error recovery
15672 * @pdev: PCI device information struct
15674 * Called to allow the driver to bring things back up after PCI error
15675 * and/or reset recovery has finished.
15677 static void i40e_pci_error_resume(struct pci_dev *pdev)
15679 struct i40e_pf *pf = pci_get_drvdata(pdev);
15681 dev_dbg(&pdev->dev, "%s\n", __func__);
15682 if (test_bit(__I40E_SUSPENDED, pf->state))
15685 i40e_handle_reset_warning(pf, false);
15689 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
15690 * using the mac_address_write admin q function
15691 * @pf: pointer to i40e_pf struct
15693 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
15695 struct i40e_hw *hw = &pf->hw;
15700 /* Get current MAC address in case it's an LAA */
15701 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
15702 ether_addr_copy(mac_addr,
15703 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
15705 dev_err(&pf->pdev->dev,
15706 "Failed to retrieve MAC address; using default\n");
15707 ether_addr_copy(mac_addr, hw->mac.addr);
15710 /* The FW expects the mac address write cmd to first be called with
15711 * one of these flags before calling it again with the multicast
15714 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
15716 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
15717 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
15719 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
15721 dev_err(&pf->pdev->dev,
15722 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
15726 flags = I40E_AQC_MC_MAG_EN
15727 | I40E_AQC_WOL_PRESERVE_ON_PFR
15728 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
15729 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
15731 dev_err(&pf->pdev->dev,
15732 "Failed to enable Multicast Magic Packet wake up\n");
15736 * i40e_shutdown - PCI callback for shutting down
15737 * @pdev: PCI device information struct
15739 static void i40e_shutdown(struct pci_dev *pdev)
15741 struct i40e_pf *pf = pci_get_drvdata(pdev);
15742 struct i40e_hw *hw = &pf->hw;
15744 set_bit(__I40E_SUSPENDED, pf->state);
15745 set_bit(__I40E_DOWN, pf->state);
15747 del_timer_sync(&pf->service_timer);
15748 cancel_work_sync(&pf->service_task);
15749 i40e_cloud_filter_exit(pf);
15750 i40e_fdir_teardown(pf);
15752 /* Client close must be called explicitly here because the timer
15753 * has been stopped.
15755 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15757 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
15758 i40e_enable_mc_magic_wake(pf);
15760 i40e_prep_for_reset(pf, false);
15762 wr32(hw, I40E_PFPM_APM,
15763 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
15764 wr32(hw, I40E_PFPM_WUFC,
15765 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
15767 /* Free MSI/legacy interrupt 0 when in recovery mode. */
15768 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15769 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15770 free_irq(pf->pdev->irq, pf);
15772 /* Since we're going to destroy queues during the
15773 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
15777 i40e_clear_interrupt_scheme(pf);
15780 if (system_state == SYSTEM_POWER_OFF) {
15781 pci_wake_from_d3(pdev, pf->wol_en);
15782 pci_set_power_state(pdev, PCI_D3hot);
15787 * i40e_suspend - PM callback for moving to D3
15788 * @dev: generic device information structure
15790 static int __maybe_unused i40e_suspend(struct device *dev)
15792 struct i40e_pf *pf = dev_get_drvdata(dev);
15793 struct i40e_hw *hw = &pf->hw;
15795 /* If we're already suspended, then there is nothing to do */
15796 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
15799 set_bit(__I40E_DOWN, pf->state);
15801 /* Ensure service task will not be running */
15802 del_timer_sync(&pf->service_timer);
15803 cancel_work_sync(&pf->service_task);
15805 /* Client close must be called explicitly here because the timer
15806 * has been stopped.
15808 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15810 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
15811 i40e_enable_mc_magic_wake(pf);
15813 /* Since we're going to destroy queues during the
15814 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
15819 i40e_prep_for_reset(pf, true);
15821 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
15822 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
15824 /* Clear the interrupt scheme and release our IRQs so that the system
15825 * can safely hibernate even when there are a large number of CPUs.
15826 * Otherwise hibernation might fail when mapping all the vectors back
15829 i40e_clear_interrupt_scheme(pf);
15837 * i40e_resume - PM callback for waking up from D3
15838 * @dev: generic device information structure
15840 static int __maybe_unused i40e_resume(struct device *dev)
15842 struct i40e_pf *pf = dev_get_drvdata(dev);
15845 /* If we're not suspended, then there is nothing to do */
15846 if (!test_bit(__I40E_SUSPENDED, pf->state))
15849 /* We need to hold the RTNL lock prior to restoring interrupt schemes,
15850 * since we're going to be restoring queues
15854 /* We cleared the interrupt scheme when we suspended, so we need to
15855 * restore it now to resume device functionality.
15857 err = i40e_restore_interrupt_scheme(pf);
15859 dev_err(dev, "Cannot restore interrupt scheme: %d\n",
15863 clear_bit(__I40E_DOWN, pf->state);
15864 i40e_reset_and_rebuild(pf, false, true);
15868 /* Clear suspended state last after everything is recovered */
15869 clear_bit(__I40E_SUSPENDED, pf->state);
15871 /* Restart the service task */
15872 mod_timer(&pf->service_timer,
15873 round_jiffies(jiffies + pf->service_timer_period));
15878 static const struct pci_error_handlers i40e_err_handler = {
15879 .error_detected = i40e_pci_error_detected,
15880 .slot_reset = i40e_pci_error_slot_reset,
15881 .reset_prepare = i40e_pci_error_reset_prepare,
15882 .reset_done = i40e_pci_error_reset_done,
15883 .resume = i40e_pci_error_resume,
15886 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
15888 static struct pci_driver i40e_driver = {
15889 .name = i40e_driver_name,
15890 .id_table = i40e_pci_tbl,
15891 .probe = i40e_probe,
15892 .remove = i40e_remove,
15894 .pm = &i40e_pm_ops,
15896 .shutdown = i40e_shutdown,
15897 .err_handler = &i40e_err_handler,
15898 .sriov_configure = i40e_pci_sriov_configure,
15902 * i40e_init_module - Driver registration routine
15904 * i40e_init_module is the first routine called when the driver is
15905 * loaded. All it does is register with the PCI subsystem.
15907 static int __init i40e_init_module(void)
15909 pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
15910 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
15912 /* There is no need to throttle the number of active tasks because
15913 * each device limits its own task using a state bit for scheduling
15914 * the service task, and the device tasks do not interfere with each
15915 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
15916 * since we need to be able to guarantee forward progress even under
15919 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
15921 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
15926 return pci_register_driver(&i40e_driver);
15928 module_init(i40e_init_module);
15931 * i40e_exit_module - Driver exit cleanup routine
15933 * i40e_exit_module is called just before the driver is removed
15936 static void __exit i40e_exit_module(void)
15938 pci_unregister_driver(&i40e_driver);
15939 destroy_workqueue(i40e_wq);
15942 module_exit(i40e_exit_module);