1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
4 /******************************************************************************
5 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
6 ******************************************************************************/
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/types.h>
11 #include <linux/bitops.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/vmalloc.h>
16 #include <linux/string.h>
19 #include <linux/tcp.h>
20 #include <linux/sctp.h>
21 #include <linux/ipv6.h>
22 #include <linux/slab.h>
23 #include <net/checksum.h>
24 #include <net/ip6_checksum.h>
25 #include <linux/ethtool.h>
27 #include <linux/if_vlan.h>
28 #include <linux/prefetch.h>
30 #include <linux/bpf.h>
31 #include <linux/bpf_trace.h>
32 #include <linux/atomic.h>
36 const char ixgbevf_driver_name[] = "ixgbevf";
37 static const char ixgbevf_driver_string[] =
38 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
40 #define DRV_VERSION "4.1.0-k"
41 const char ixgbevf_driver_version[] = DRV_VERSION;
42 static char ixgbevf_copyright[] =
43 "Copyright (c) 2009 - 2018 Intel Corporation.";
45 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
46 [board_82599_vf] = &ixgbevf_82599_vf_info,
47 [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
48 [board_X540_vf] = &ixgbevf_X540_vf_info,
49 [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
50 [board_X550_vf] = &ixgbevf_X550_vf_info,
51 [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
52 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
53 [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
54 [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
57 /* ixgbevf_pci_tbl - PCI Device ID Table
59 * Wildcard entries (PCI_ANY_ID) should come last
60 * Last entry must be all 0s
62 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
63 * Class, Class Mask, private data (not used) }
65 static const struct pci_device_id ixgbevf_pci_tbl[] = {
66 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
67 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
68 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
70 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
72 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
74 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
75 /* required last entry */
78 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
80 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
81 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
82 MODULE_LICENSE("GPL v2");
83 MODULE_VERSION(DRV_VERSION);
85 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
86 static int debug = -1;
87 module_param(debug, int, 0);
88 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
90 static struct workqueue_struct *ixgbevf_wq;
92 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
94 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
95 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
96 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
97 queue_work(ixgbevf_wq, &adapter->service_task);
100 static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
102 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
104 /* flush memory to make sure state is correct before next watchdog */
105 smp_mb__before_atomic();
106 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
110 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
111 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
112 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
113 static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer);
114 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
115 struct ixgbevf_rx_buffer *old_buff);
117 static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
119 struct ixgbevf_adapter *adapter = hw->back;
124 dev_err(&adapter->pdev->dev, "Adapter removed\n");
125 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
126 ixgbevf_service_event_schedule(adapter);
129 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
133 /* The following check not only optimizes a bit by not
134 * performing a read on the status register when the
135 * register just read was a status register read that
136 * returned IXGBE_FAILED_READ_REG. It also blocks any
137 * potential recursion.
139 if (reg == IXGBE_VFSTATUS) {
140 ixgbevf_remove_adapter(hw);
143 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
144 if (value == IXGBE_FAILED_READ_REG)
145 ixgbevf_remove_adapter(hw);
148 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
150 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
153 if (IXGBE_REMOVED(reg_addr))
154 return IXGBE_FAILED_READ_REG;
155 value = readl(reg_addr + reg);
156 if (unlikely(value == IXGBE_FAILED_READ_REG))
157 ixgbevf_check_remove(hw, reg);
162 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
163 * @adapter: pointer to adapter struct
164 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
165 * @queue: queue to map the corresponding interrupt to
166 * @msix_vector: the vector to map to the corresponding queue
168 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
169 u8 queue, u8 msix_vector)
172 struct ixgbe_hw *hw = &adapter->hw;
174 if (direction == -1) {
176 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
177 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
180 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
182 /* Tx or Rx causes */
183 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
184 index = ((16 * (queue & 1)) + (8 * direction));
185 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
186 ivar &= ~(0xFF << index);
187 ivar |= (msix_vector << index);
188 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
192 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
194 return ring->stats.packets;
197 static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
199 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
200 struct ixgbe_hw *hw = &adapter->hw;
202 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
203 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
206 return (head < tail) ?
207 tail - head : (tail + ring->count - head);
212 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
214 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
215 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
216 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
218 clear_check_for_tx_hang(tx_ring);
220 /* Check for a hung queue, but be thorough. This verifies
221 * that a transmit has been completed since the previous
222 * check AND there is at least one packet pending. The
223 * ARMED bit is set to indicate a potential hang.
225 if ((tx_done_old == tx_done) && tx_pending) {
226 /* make sure it is true for two checks in a row */
227 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
230 /* reset the countdown */
231 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
233 /* update completed stats and continue */
234 tx_ring->tx_stats.tx_done_old = tx_done;
239 static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
241 /* Do the reset outside of interrupt context */
242 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
243 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
244 ixgbevf_service_event_schedule(adapter);
249 * ixgbevf_tx_timeout - Respond to a Tx Hang
250 * @netdev: network interface device structure
252 static void ixgbevf_tx_timeout(struct net_device *netdev)
254 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
256 ixgbevf_tx_timeout_reset(adapter);
260 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
261 * @q_vector: board private structure
262 * @tx_ring: tx ring to clean
263 * @napi_budget: Used to determine if we are in netpoll
265 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
266 struct ixgbevf_ring *tx_ring, int napi_budget)
268 struct ixgbevf_adapter *adapter = q_vector->adapter;
269 struct ixgbevf_tx_buffer *tx_buffer;
270 union ixgbe_adv_tx_desc *tx_desc;
271 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
272 unsigned int budget = tx_ring->count / 2;
273 unsigned int i = tx_ring->next_to_clean;
275 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
278 tx_buffer = &tx_ring->tx_buffer_info[i];
279 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
283 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
285 /* if next_to_watch is not set then there is no work pending */
289 /* prevent any other reads prior to eop_desc */
292 /* if DD is not set pending work has not been completed */
293 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
296 /* clear next_to_watch to prevent false hangs */
297 tx_buffer->next_to_watch = NULL;
299 /* update the statistics for this packet */
300 total_bytes += tx_buffer->bytecount;
301 total_packets += tx_buffer->gso_segs;
302 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
306 if (ring_is_xdp(tx_ring))
307 page_frag_free(tx_buffer->data);
309 napi_consume_skb(tx_buffer->skb, napi_budget);
311 /* unmap skb header data */
312 dma_unmap_single(tx_ring->dev,
313 dma_unmap_addr(tx_buffer, dma),
314 dma_unmap_len(tx_buffer, len),
317 /* clear tx_buffer data */
318 dma_unmap_len_set(tx_buffer, len, 0);
320 /* unmap remaining buffers */
321 while (tx_desc != eop_desc) {
327 tx_buffer = tx_ring->tx_buffer_info;
328 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
331 /* unmap any remaining paged data */
332 if (dma_unmap_len(tx_buffer, len)) {
333 dma_unmap_page(tx_ring->dev,
334 dma_unmap_addr(tx_buffer, dma),
335 dma_unmap_len(tx_buffer, len),
337 dma_unmap_len_set(tx_buffer, len, 0);
341 /* move us one more past the eop_desc for start of next pkt */
347 tx_buffer = tx_ring->tx_buffer_info;
348 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
351 /* issue prefetch for next Tx descriptor */
354 /* update budget accounting */
356 } while (likely(budget));
359 tx_ring->next_to_clean = i;
360 u64_stats_update_begin(&tx_ring->syncp);
361 tx_ring->stats.bytes += total_bytes;
362 tx_ring->stats.packets += total_packets;
363 u64_stats_update_end(&tx_ring->syncp);
364 q_vector->tx.total_bytes += total_bytes;
365 q_vector->tx.total_packets += total_packets;
366 adapter->tx_ipsec += total_ipsec;
368 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
369 struct ixgbe_hw *hw = &adapter->hw;
370 union ixgbe_adv_tx_desc *eop_desc;
372 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
374 pr_err("Detected Tx Unit Hang%s\n"
376 " TDH, TDT <%x>, <%x>\n"
377 " next_to_use <%x>\n"
378 " next_to_clean <%x>\n"
379 "tx_buffer_info[next_to_clean]\n"
380 " next_to_watch <%p>\n"
381 " eop_desc->wb.status <%x>\n"
382 " time_stamp <%lx>\n"
384 ring_is_xdp(tx_ring) ? " XDP" : "",
385 tx_ring->queue_index,
386 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
387 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
388 tx_ring->next_to_use, i,
389 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
390 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
392 if (!ring_is_xdp(tx_ring))
393 netif_stop_subqueue(tx_ring->netdev,
394 tx_ring->queue_index);
396 /* schedule immediate reset if we believe we hung */
397 ixgbevf_tx_timeout_reset(adapter);
402 if (ring_is_xdp(tx_ring))
405 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
406 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
407 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
408 /* Make sure that anybody stopping the queue after this
409 * sees the new next_to_clean.
413 if (__netif_subqueue_stopped(tx_ring->netdev,
414 tx_ring->queue_index) &&
415 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
416 netif_wake_subqueue(tx_ring->netdev,
417 tx_ring->queue_index);
418 ++tx_ring->tx_stats.restart_queue;
426 * ixgbevf_rx_skb - Helper function to determine proper Rx method
427 * @q_vector: structure containing interrupt and ring information
428 * @skb: packet to send up
430 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
433 napi_gro_receive(&q_vector->napi, skb);
436 #define IXGBE_RSS_L4_TYPES_MASK \
437 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
438 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
439 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
440 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
442 static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
443 union ixgbe_adv_rx_desc *rx_desc,
448 if (!(ring->netdev->features & NETIF_F_RXHASH))
451 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
452 IXGBE_RXDADV_RSSTYPE_MASK;
457 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
458 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
459 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
463 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
464 * @ring: structure containig ring specific data
465 * @rx_desc: current Rx descriptor being processed
466 * @skb: skb currently being received and modified
468 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
469 union ixgbe_adv_rx_desc *rx_desc,
472 skb_checksum_none_assert(skb);
474 /* Rx csum disabled */
475 if (!(ring->netdev->features & NETIF_F_RXCSUM))
478 /* if IP and error */
479 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
480 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
481 ring->rx_stats.csum_err++;
485 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
488 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
489 ring->rx_stats.csum_err++;
493 /* It must be a TCP or UDP packet with a valid checksum */
494 skb->ip_summed = CHECKSUM_UNNECESSARY;
498 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
499 * @rx_ring: rx descriptor ring packet is being transacted on
500 * @rx_desc: pointer to the EOP Rx descriptor
501 * @skb: pointer to current skb being populated
503 * This function checks the ring, descriptor, and packet information in
504 * order to populate the checksum, VLAN, protocol, and other fields within
507 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
508 union ixgbe_adv_rx_desc *rx_desc,
511 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
512 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
514 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
515 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
516 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
518 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
519 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
522 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
523 ixgbevf_ipsec_rx(rx_ring, rx_desc, skb);
525 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
529 struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring,
530 const unsigned int size)
532 struct ixgbevf_rx_buffer *rx_buffer;
534 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
535 prefetchw(rx_buffer->page);
537 /* we are reusing so sync this buffer for CPU use */
538 dma_sync_single_range_for_cpu(rx_ring->dev,
540 rx_buffer->page_offset,
544 rx_buffer->pagecnt_bias--;
549 static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring,
550 struct ixgbevf_rx_buffer *rx_buffer,
553 if (ixgbevf_can_reuse_rx_page(rx_buffer)) {
554 /* hand second half of page back to the ring */
555 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
558 /* We are not reusing the buffer so unmap it and free
559 * any references we are holding to it
561 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
562 ixgbevf_rx_pg_size(rx_ring),
564 IXGBEVF_RX_DMA_ATTR);
565 __page_frag_cache_drain(rx_buffer->page,
566 rx_buffer->pagecnt_bias);
569 /* clear contents of rx_buffer */
570 rx_buffer->page = NULL;
574 * ixgbevf_is_non_eop - process handling of non-EOP buffers
575 * @rx_ring: Rx ring being processed
576 * @rx_desc: Rx descriptor for current buffer
578 * This function updates next to clean. If the buffer is an EOP buffer
579 * this function exits returning false, otherwise it will place the
580 * sk_buff in the next buffer to be chained and return true indicating
581 * that this is in fact a non-EOP buffer.
583 static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
584 union ixgbe_adv_rx_desc *rx_desc)
586 u32 ntc = rx_ring->next_to_clean + 1;
588 /* fetch, update, and store next to clean */
589 ntc = (ntc < rx_ring->count) ? ntc : 0;
590 rx_ring->next_to_clean = ntc;
592 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
594 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
600 static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring)
602 return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0;
605 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
606 struct ixgbevf_rx_buffer *bi)
608 struct page *page = bi->page;
611 /* since we are recycling buffers we should seldom need to alloc */
615 /* alloc new page for storage */
616 page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
617 if (unlikely(!page)) {
618 rx_ring->rx_stats.alloc_rx_page_failed++;
622 /* map page for use */
623 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
624 ixgbevf_rx_pg_size(rx_ring),
625 DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
627 /* if mapping failed free memory back to system since
628 * there isn't much point in holding memory we can't use
630 if (dma_mapping_error(rx_ring->dev, dma)) {
631 __free_pages(page, ixgbevf_rx_pg_order(rx_ring));
633 rx_ring->rx_stats.alloc_rx_page_failed++;
639 bi->page_offset = ixgbevf_rx_offset(rx_ring);
640 bi->pagecnt_bias = 1;
641 rx_ring->rx_stats.alloc_rx_page++;
647 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
648 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
649 * @cleaned_count: number of buffers to replace
651 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
654 union ixgbe_adv_rx_desc *rx_desc;
655 struct ixgbevf_rx_buffer *bi;
656 unsigned int i = rx_ring->next_to_use;
658 /* nothing to do or no valid netdev defined */
659 if (!cleaned_count || !rx_ring->netdev)
662 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
663 bi = &rx_ring->rx_buffer_info[i];
667 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
670 /* sync the buffer for use by the device */
671 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
673 ixgbevf_rx_bufsz(rx_ring),
676 /* Refresh the desc even if pkt_addr didn't change
677 * because each write-back erases this info.
679 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
685 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
686 bi = rx_ring->rx_buffer_info;
690 /* clear the length for the next_to_use descriptor */
691 rx_desc->wb.upper.length = 0;
694 } while (cleaned_count);
698 if (rx_ring->next_to_use != i) {
699 /* record the next descriptor to use */
700 rx_ring->next_to_use = i;
702 /* update next to alloc since we have filled the ring */
703 rx_ring->next_to_alloc = i;
705 /* Force memory writes to complete before letting h/w
706 * know there are new descriptors to fetch. (Only
707 * applicable for weak-ordered memory model archs,
711 ixgbevf_write_tail(rx_ring, i);
716 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
717 * @rx_ring: rx descriptor ring packet is being transacted on
718 * @rx_desc: pointer to the EOP Rx descriptor
719 * @skb: pointer to current skb being fixed
721 * Check for corrupted packet headers caused by senders on the local L2
722 * embedded NIC switch not setting up their Tx Descriptors right. These
723 * should be very rare.
725 * Also address the case where we are pulling data in on pages only
726 * and as such no data is present in the skb header.
728 * In addition if skb is not at least 60 bytes we need to pad it so that
729 * it is large enough to qualify as a valid Ethernet frame.
731 * Returns true if an error was encountered and skb was freed.
733 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
734 union ixgbe_adv_rx_desc *rx_desc,
737 /* XDP packets use error pointer so abort at this point */
741 /* verify that the packet does not have any known errors */
742 if (unlikely(ixgbevf_test_staterr(rx_desc,
743 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
744 struct net_device *netdev = rx_ring->netdev;
746 if (!(netdev->features & NETIF_F_RXALL)) {
747 dev_kfree_skb_any(skb);
752 /* if eth_skb_pad returns an error the skb was freed */
753 if (eth_skb_pad(skb))
760 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
761 * @rx_ring: rx descriptor ring to store buffers on
762 * @old_buff: donor buffer to have page reused
764 * Synchronizes page for reuse by the adapter
766 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
767 struct ixgbevf_rx_buffer *old_buff)
769 struct ixgbevf_rx_buffer *new_buff;
770 u16 nta = rx_ring->next_to_alloc;
772 new_buff = &rx_ring->rx_buffer_info[nta];
774 /* update, and store next to alloc */
776 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
778 /* transfer page from old buffer to new buffer */
779 new_buff->page = old_buff->page;
780 new_buff->dma = old_buff->dma;
781 new_buff->page_offset = old_buff->page_offset;
782 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
785 static inline bool ixgbevf_page_is_reserved(struct page *page)
787 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
790 static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
792 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
793 struct page *page = rx_buffer->page;
795 /* avoid re-using remote pages */
796 if (unlikely(ixgbevf_page_is_reserved(page)))
799 #if (PAGE_SIZE < 8192)
800 /* if we are only owner of page we can reuse it */
801 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
804 #define IXGBEVF_LAST_OFFSET \
805 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
807 if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
812 /* If we have drained the page fragment pool we need to update
813 * the pagecnt_bias and page count so that we fully restock the
814 * number of references the driver holds.
816 if (unlikely(!pagecnt_bias)) {
817 page_ref_add(page, USHRT_MAX);
818 rx_buffer->pagecnt_bias = USHRT_MAX;
825 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
826 * @rx_ring: rx descriptor ring to transact packets on
827 * @rx_buffer: buffer containing page to add
828 * @skb: sk_buff to place the data into
829 * @size: size of buffer to be added
831 * This function will add the data contained in rx_buffer->page to the skb.
833 static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
834 struct ixgbevf_rx_buffer *rx_buffer,
838 #if (PAGE_SIZE < 8192)
839 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
841 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
842 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
843 SKB_DATA_ALIGN(size);
845 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
846 rx_buffer->page_offset, size, truesize);
847 #if (PAGE_SIZE < 8192)
848 rx_buffer->page_offset ^= truesize;
850 rx_buffer->page_offset += truesize;
855 struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
856 struct ixgbevf_rx_buffer *rx_buffer,
857 struct xdp_buff *xdp,
858 union ixgbe_adv_rx_desc *rx_desc)
860 unsigned int size = xdp->data_end - xdp->data;
861 #if (PAGE_SIZE < 8192)
862 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
864 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
865 xdp->data_hard_start);
867 unsigned int headlen;
870 /* prefetch first cache line of first page */
872 #if L1_CACHE_BYTES < 128
873 prefetch(xdp->data + L1_CACHE_BYTES);
875 /* Note, we get here by enabling legacy-rx via:
877 * ethtool --set-priv-flags <dev> legacy-rx on
879 * In this mode, we currently get 0 extra XDP headroom as
880 * opposed to having legacy-rx off, where we process XDP
881 * packets going to stack via ixgbevf_build_skb().
883 * For ixgbevf_construct_skb() mode it means that the
884 * xdp->data_meta will always point to xdp->data, since
885 * the helper cannot expand the head. Should this ever
886 * changed in future for legacy-rx mode on, then lets also
887 * add xdp->data_meta handling here.
890 /* allocate a skb to store the frags */
891 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);
895 /* Determine available headroom for copy */
897 if (headlen > IXGBEVF_RX_HDR_SIZE)
898 headlen = eth_get_headlen(skb->dev, xdp->data,
899 IXGBEVF_RX_HDR_SIZE);
901 /* align pull length to size of long to optimize memcpy performance */
902 memcpy(__skb_put(skb, headlen), xdp->data,
903 ALIGN(headlen, sizeof(long)));
905 /* update all of the pointers */
908 skb_add_rx_frag(skb, 0, rx_buffer->page,
909 (xdp->data + headlen) -
910 page_address(rx_buffer->page),
912 #if (PAGE_SIZE < 8192)
913 rx_buffer->page_offset ^= truesize;
915 rx_buffer->page_offset += truesize;
918 rx_buffer->pagecnt_bias++;
924 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
927 struct ixgbe_hw *hw = &adapter->hw;
929 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
932 static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
933 struct ixgbevf_rx_buffer *rx_buffer,
934 struct xdp_buff *xdp,
935 union ixgbe_adv_rx_desc *rx_desc)
937 unsigned int metasize = xdp->data - xdp->data_meta;
938 #if (PAGE_SIZE < 8192)
939 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
941 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
942 SKB_DATA_ALIGN(xdp->data_end -
943 xdp->data_hard_start);
947 /* Prefetch first cache line of first page. If xdp->data_meta
948 * is unused, this points to xdp->data, otherwise, we likely
949 * have a consumer accessing first few bytes of meta data,
950 * and then actual data.
952 prefetch(xdp->data_meta);
953 #if L1_CACHE_BYTES < 128
954 prefetch(xdp->data_meta + L1_CACHE_BYTES);
957 /* build an skb around the page buffer */
958 skb = build_skb(xdp->data_hard_start, truesize);
962 /* update pointers within the skb to store the data */
963 skb_reserve(skb, xdp->data - xdp->data_hard_start);
964 __skb_put(skb, xdp->data_end - xdp->data);
966 skb_metadata_set(skb, metasize);
968 /* update buffer offset */
969 #if (PAGE_SIZE < 8192)
970 rx_buffer->page_offset ^= truesize;
972 rx_buffer->page_offset += truesize;
978 #define IXGBEVF_XDP_PASS 0
979 #define IXGBEVF_XDP_CONSUMED 1
980 #define IXGBEVF_XDP_TX 2
982 static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
983 struct xdp_buff *xdp)
985 struct ixgbevf_tx_buffer *tx_buffer;
986 union ixgbe_adv_tx_desc *tx_desc;
991 len = xdp->data_end - xdp->data;
993 if (unlikely(!ixgbevf_desc_unused(ring)))
994 return IXGBEVF_XDP_CONSUMED;
996 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
997 if (dma_mapping_error(ring->dev, dma))
998 return IXGBEVF_XDP_CONSUMED;
1000 /* record the location of the first descriptor for this packet */
1001 i = ring->next_to_use;
1002 tx_buffer = &ring->tx_buffer_info[i];
1004 dma_unmap_len_set(tx_buffer, len, len);
1005 dma_unmap_addr_set(tx_buffer, dma, dma);
1006 tx_buffer->data = xdp->data;
1007 tx_buffer->bytecount = len;
1008 tx_buffer->gso_segs = 1;
1009 tx_buffer->protocol = 0;
1011 /* Populate minimal context descriptor that will provide for the
1012 * fact that we are expected to process Ethernet frames.
1014 if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) {
1015 struct ixgbe_adv_tx_context_desc *context_desc;
1017 set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1019 context_desc = IXGBEVF_TX_CTXTDESC(ring, 0);
1020 context_desc->vlan_macip_lens =
1021 cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT);
1022 context_desc->fceof_saidx = 0;
1023 context_desc->type_tucmd_mlhl =
1024 cpu_to_le32(IXGBE_TXD_CMD_DEXT |
1025 IXGBE_ADVTXD_DTYP_CTXT);
1026 context_desc->mss_l4len_idx = 0;
1031 /* put descriptor type bits */
1032 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
1033 IXGBE_ADVTXD_DCMD_DEXT |
1034 IXGBE_ADVTXD_DCMD_IFCS;
1035 cmd_type |= len | IXGBE_TXD_CMD;
1037 tx_desc = IXGBEVF_TX_DESC(ring, i);
1038 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1040 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1041 tx_desc->read.olinfo_status =
1042 cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
1045 /* Avoid any potential race with cleanup */
1048 /* set next_to_watch value indicating a packet is present */
1050 if (i == ring->count)
1053 tx_buffer->next_to_watch = tx_desc;
1054 ring->next_to_use = i;
1056 return IXGBEVF_XDP_TX;
1059 static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
1060 struct ixgbevf_ring *rx_ring,
1061 struct xdp_buff *xdp)
1063 int result = IXGBEVF_XDP_PASS;
1064 struct ixgbevf_ring *xdp_ring;
1065 struct bpf_prog *xdp_prog;
1069 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1074 act = bpf_prog_run_xdp(xdp_prog, xdp);
1079 xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
1080 result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
1083 bpf_warn_invalid_xdp_action(act);
1086 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1087 /* fallthrough -- handle aborts by dropping packet */
1089 result = IXGBEVF_XDP_CONSUMED;
1094 return ERR_PTR(-result);
1097 static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
1098 struct ixgbevf_rx_buffer *rx_buffer,
1101 #if (PAGE_SIZE < 8192)
1102 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
1104 rx_buffer->page_offset ^= truesize;
1106 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
1107 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
1108 SKB_DATA_ALIGN(size);
1110 rx_buffer->page_offset += truesize;
1114 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
1115 struct ixgbevf_ring *rx_ring,
1118 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1119 struct ixgbevf_adapter *adapter = q_vector->adapter;
1120 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
1121 struct sk_buff *skb = rx_ring->skb;
1122 bool xdp_xmit = false;
1123 struct xdp_buff xdp;
1125 xdp.rxq = &rx_ring->xdp_rxq;
1127 while (likely(total_rx_packets < budget)) {
1128 struct ixgbevf_rx_buffer *rx_buffer;
1129 union ixgbe_adv_rx_desc *rx_desc;
1132 /* return some buffers to hardware, one at a time is too slow */
1133 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
1134 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
1138 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1139 size = le16_to_cpu(rx_desc->wb.upper.length);
1143 /* This memory barrier is needed to keep us from reading
1144 * any other fields out of the rx_desc until we know the
1145 * RXD_STAT_DD bit is set
1149 rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);
1151 /* retrieve a buffer from the ring */
1153 xdp.data = page_address(rx_buffer->page) +
1154 rx_buffer->page_offset;
1155 xdp.data_meta = xdp.data;
1156 xdp.data_hard_start = xdp.data -
1157 ixgbevf_rx_offset(rx_ring);
1158 xdp.data_end = xdp.data + size;
1160 skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
1164 if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
1166 ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
1169 rx_buffer->pagecnt_bias++;
1172 total_rx_bytes += size;
1174 ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1175 } else if (ring_uses_build_skb(rx_ring)) {
1176 skb = ixgbevf_build_skb(rx_ring, rx_buffer,
1179 skb = ixgbevf_construct_skb(rx_ring, rx_buffer,
1183 /* exit if we failed to retrieve a buffer */
1185 rx_ring->rx_stats.alloc_rx_buff_failed++;
1186 rx_buffer->pagecnt_bias++;
1190 ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb);
1193 /* fetch next buffer in frame if non-eop */
1194 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
1197 /* verify the packet layout is correct */
1198 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
1203 /* probably a little skewed due to removing CRC */
1204 total_rx_bytes += skb->len;
1206 /* Workaround hardware that can't do proper VEPA multicast
1209 if ((skb->pkt_type == PACKET_BROADCAST ||
1210 skb->pkt_type == PACKET_MULTICAST) &&
1211 ether_addr_equal(rx_ring->netdev->dev_addr,
1212 eth_hdr(skb)->h_source)) {
1213 dev_kfree_skb_irq(skb);
1217 /* populate checksum, VLAN, and protocol */
1218 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
1220 ixgbevf_rx_skb(q_vector, skb);
1222 /* reset skb pointer */
1225 /* update budget accounting */
1229 /* place incomplete frames back on ring for completion */
1233 struct ixgbevf_ring *xdp_ring =
1234 adapter->xdp_ring[rx_ring->queue_index];
1236 /* Force memory writes to complete before letting h/w
1237 * know there are new descriptors to fetch.
1240 ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use);
1243 u64_stats_update_begin(&rx_ring->syncp);
1244 rx_ring->stats.packets += total_rx_packets;
1245 rx_ring->stats.bytes += total_rx_bytes;
1246 u64_stats_update_end(&rx_ring->syncp);
1247 q_vector->rx.total_packets += total_rx_packets;
1248 q_vector->rx.total_bytes += total_rx_bytes;
1250 return total_rx_packets;
1254 * ixgbevf_poll - NAPI polling calback
1255 * @napi: napi struct with our devices info in it
1256 * @budget: amount of work driver is allowed to do this pass, in packets
1258 * This function will clean more than one or more rings associated with a
1261 static int ixgbevf_poll(struct napi_struct *napi, int budget)
1263 struct ixgbevf_q_vector *q_vector =
1264 container_of(napi, struct ixgbevf_q_vector, napi);
1265 struct ixgbevf_adapter *adapter = q_vector->adapter;
1266 struct ixgbevf_ring *ring;
1267 int per_ring_budget, work_done = 0;
1268 bool clean_complete = true;
1270 ixgbevf_for_each_ring(ring, q_vector->tx) {
1271 if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
1272 clean_complete = false;
1278 /* attempt to distribute budget to each queue fairly, but don't allow
1279 * the budget to go below 1 because we'll exit polling
1281 if (q_vector->rx.count > 1)
1282 per_ring_budget = max(budget/q_vector->rx.count, 1);
1284 per_ring_budget = budget;
1286 ixgbevf_for_each_ring(ring, q_vector->rx) {
1287 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1289 work_done += cleaned;
1290 if (cleaned >= per_ring_budget)
1291 clean_complete = false;
1294 /* If all work not completed, return budget and keep polling */
1295 if (!clean_complete)
1298 /* Exit the polling mode, but don't re-enable interrupts if stack might
1299 * poll us due to busy-polling
1301 if (likely(napi_complete_done(napi, work_done))) {
1302 if (adapter->rx_itr_setting == 1)
1303 ixgbevf_set_itr(q_vector);
1304 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1305 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1306 ixgbevf_irq_enable_queues(adapter,
1307 BIT(q_vector->v_idx));
1310 return min(work_done, budget - 1);
1314 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1315 * @q_vector: structure containing interrupt and ring information
1317 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
1319 struct ixgbevf_adapter *adapter = q_vector->adapter;
1320 struct ixgbe_hw *hw = &adapter->hw;
1321 int v_idx = q_vector->v_idx;
1322 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1324 /* set the WDIS bit to not clear the timer bits and cause an
1325 * immediate assertion of the interrupt
1327 itr_reg |= IXGBE_EITR_CNT_WDIS;
1329 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1333 * ixgbevf_configure_msix - Configure MSI-X hardware
1334 * @adapter: board private structure
1336 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1339 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1341 struct ixgbevf_q_vector *q_vector;
1342 int q_vectors, v_idx;
1344 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1345 adapter->eims_enable_mask = 0;
1347 /* Populate the IVAR table and set the ITR values to the
1348 * corresponding register.
1350 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1351 struct ixgbevf_ring *ring;
1353 q_vector = adapter->q_vector[v_idx];
1355 ixgbevf_for_each_ring(ring, q_vector->rx)
1356 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1358 ixgbevf_for_each_ring(ring, q_vector->tx)
1359 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1361 if (q_vector->tx.ring && !q_vector->rx.ring) {
1362 /* Tx only vector */
1363 if (adapter->tx_itr_setting == 1)
1364 q_vector->itr = IXGBE_12K_ITR;
1366 q_vector->itr = adapter->tx_itr_setting;
1368 /* Rx or Rx/Tx vector */
1369 if (adapter->rx_itr_setting == 1)
1370 q_vector->itr = IXGBE_20K_ITR;
1372 q_vector->itr = adapter->rx_itr_setting;
1375 /* add q_vector eims value to global eims_enable_mask */
1376 adapter->eims_enable_mask |= BIT(v_idx);
1378 ixgbevf_write_eitr(q_vector);
1381 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
1382 /* setup eims_other and add value to global eims_enable_mask */
1383 adapter->eims_other = BIT(v_idx);
1384 adapter->eims_enable_mask |= adapter->eims_other;
1387 enum latency_range {
1391 latency_invalid = 255
1395 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1396 * @q_vector: structure containing interrupt and ring information
1397 * @ring_container: structure containing ring performance data
1399 * Stores a new ITR value based on packets and byte
1400 * counts during the last interrupt. The advantage of per interrupt
1401 * computation is faster updates and more accurate ITR for the current
1402 * traffic pattern. Constants in this function were computed
1403 * based on theoretical maximum wire speed and thresholds were set based
1404 * on testing data as well as attempting to minimize response time
1405 * while increasing bulk throughput.
1407 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1408 struct ixgbevf_ring_container *ring_container)
1410 int bytes = ring_container->total_bytes;
1411 int packets = ring_container->total_packets;
1414 u8 itr_setting = ring_container->itr;
1419 /* simple throttle rate management
1420 * 0-20MB/s lowest (100000 ints/s)
1421 * 20-100MB/s low (20000 ints/s)
1422 * 100-1249MB/s bulk (12000 ints/s)
1424 /* what was last interrupt timeslice? */
1425 timepassed_us = q_vector->itr >> 2;
1426 if (timepassed_us == 0)
1429 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1431 switch (itr_setting) {
1432 case lowest_latency:
1433 if (bytes_perint > 10)
1434 itr_setting = low_latency;
1437 if (bytes_perint > 20)
1438 itr_setting = bulk_latency;
1439 else if (bytes_perint <= 10)
1440 itr_setting = lowest_latency;
1443 if (bytes_perint <= 20)
1444 itr_setting = low_latency;
1448 /* clear work counters since we have the values we need */
1449 ring_container->total_bytes = 0;
1450 ring_container->total_packets = 0;
1452 /* write updated itr to ring container */
1453 ring_container->itr = itr_setting;
1456 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1458 u32 new_itr = q_vector->itr;
1461 ixgbevf_update_itr(q_vector, &q_vector->tx);
1462 ixgbevf_update_itr(q_vector, &q_vector->rx);
1464 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1466 switch (current_itr) {
1467 /* counts and packets in update_itr are dependent on these numbers */
1468 case lowest_latency:
1469 new_itr = IXGBE_100K_ITR;
1472 new_itr = IXGBE_20K_ITR;
1475 new_itr = IXGBE_12K_ITR;
1481 if (new_itr != q_vector->itr) {
1482 /* do an exponential smoothing */
1483 new_itr = (10 * new_itr * q_vector->itr) /
1484 ((9 * new_itr) + q_vector->itr);
1486 /* save the algorithm value here */
1487 q_vector->itr = new_itr;
1489 ixgbevf_write_eitr(q_vector);
1493 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1495 struct ixgbevf_adapter *adapter = data;
1496 struct ixgbe_hw *hw = &adapter->hw;
1498 hw->mac.get_link_status = 1;
1500 ixgbevf_service_event_schedule(adapter);
1502 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1508 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1510 * @data: pointer to our q_vector struct for this interrupt vector
1512 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1514 struct ixgbevf_q_vector *q_vector = data;
1516 /* EIAM disabled interrupts (on this vector) for us */
1517 if (q_vector->rx.ring || q_vector->tx.ring)
1518 napi_schedule_irqoff(&q_vector->napi);
1524 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1525 * @adapter: board private structure
1527 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1528 * interrupts from the kernel.
1530 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1532 struct net_device *netdev = adapter->netdev;
1533 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1534 unsigned int ri = 0, ti = 0;
1537 for (vector = 0; vector < q_vectors; vector++) {
1538 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1539 struct msix_entry *entry = &adapter->msix_entries[vector];
1541 if (q_vector->tx.ring && q_vector->rx.ring) {
1542 snprintf(q_vector->name, sizeof(q_vector->name),
1543 "%s-TxRx-%u", netdev->name, ri++);
1545 } else if (q_vector->rx.ring) {
1546 snprintf(q_vector->name, sizeof(q_vector->name),
1547 "%s-rx-%u", netdev->name, ri++);
1548 } else if (q_vector->tx.ring) {
1549 snprintf(q_vector->name, sizeof(q_vector->name),
1550 "%s-tx-%u", netdev->name, ti++);
1552 /* skip this unused q_vector */
1555 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1556 q_vector->name, q_vector);
1558 hw_dbg(&adapter->hw,
1559 "request_irq failed for MSIX interrupt Error: %d\n",
1561 goto free_queue_irqs;
1565 err = request_irq(adapter->msix_entries[vector].vector,
1566 &ixgbevf_msix_other, 0, netdev->name, adapter);
1568 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1570 goto free_queue_irqs;
1578 free_irq(adapter->msix_entries[vector].vector,
1579 adapter->q_vector[vector]);
1581 /* This failure is non-recoverable - it indicates the system is
1582 * out of MSIX vector resources and the VF driver cannot run
1583 * without them. Set the number of msix vectors to zero
1584 * indicating that not enough can be allocated. The error
1585 * will be returned to the user indicating device open failed.
1586 * Any further attempts to force the driver to open will also
1587 * fail. The only way to recover is to unload the driver and
1588 * reload it again. If the system has recovered some MSIX
1589 * vectors then it may succeed.
1591 adapter->num_msix_vectors = 0;
1596 * ixgbevf_request_irq - initialize interrupts
1597 * @adapter: board private structure
1599 * Attempts to configure interrupts using the best available
1600 * capabilities of the hardware and kernel.
1602 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1604 int err = ixgbevf_request_msix_irqs(adapter);
1607 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
1612 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1616 if (!adapter->msix_entries)
1619 q_vectors = adapter->num_msix_vectors;
1622 free_irq(adapter->msix_entries[i].vector, adapter);
1625 for (; i >= 0; i--) {
1626 /* free only the irqs that were actually requested */
1627 if (!adapter->q_vector[i]->rx.ring &&
1628 !adapter->q_vector[i]->tx.ring)
1631 free_irq(adapter->msix_entries[i].vector,
1632 adapter->q_vector[i]);
1637 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1638 * @adapter: board private structure
1640 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1642 struct ixgbe_hw *hw = &adapter->hw;
1645 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1646 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1647 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1649 IXGBE_WRITE_FLUSH(hw);
1651 for (i = 0; i < adapter->num_msix_vectors; i++)
1652 synchronize_irq(adapter->msix_entries[i].vector);
1656 * ixgbevf_irq_enable - Enable default interrupt generation settings
1657 * @adapter: board private structure
1659 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1661 struct ixgbe_hw *hw = &adapter->hw;
1663 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1664 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1665 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1669 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1670 * @adapter: board private structure
1671 * @ring: structure containing ring specific data
1673 * Configure the Tx descriptor ring after a reset.
1675 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1676 struct ixgbevf_ring *ring)
1678 struct ixgbe_hw *hw = &adapter->hw;
1679 u64 tdba = ring->dma;
1681 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1682 u8 reg_idx = ring->reg_idx;
1684 /* disable queue to avoid issues while updating state */
1685 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1686 IXGBE_WRITE_FLUSH(hw);
1688 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1689 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1690 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1691 ring->count * sizeof(union ixgbe_adv_tx_desc));
1693 /* disable head writeback */
1694 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1695 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1697 /* enable relaxed ordering */
1698 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1699 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1700 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1702 /* reset head and tail pointers */
1703 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1704 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1705 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1707 /* reset ntu and ntc to place SW in sync with hardwdare */
1708 ring->next_to_clean = 0;
1709 ring->next_to_use = 0;
1711 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1712 * to or less than the number of on chip descriptors, which is
1715 txdctl |= (8 << 16); /* WTHRESH = 8 */
1717 /* Setting PTHRESH to 32 both improves performance */
1718 txdctl |= (1u << 8) | /* HTHRESH = 1 */
1719 32; /* PTHRESH = 32 */
1721 /* reinitialize tx_buffer_info */
1722 memset(ring->tx_buffer_info, 0,
1723 sizeof(struct ixgbevf_tx_buffer) * ring->count);
1725 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1726 clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1728 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1730 /* poll to verify queue is enabled */
1732 usleep_range(1000, 2000);
1733 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1734 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1736 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
1740 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1741 * @adapter: board private structure
1743 * Configure the Tx unit of the MAC after a reset.
1745 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1749 /* Setup the HW Tx Head and Tail descriptor pointers */
1750 for (i = 0; i < adapter->num_tx_queues; i++)
1751 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1752 for (i = 0; i < adapter->num_xdp_queues; i++)
1753 ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]);
1756 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1758 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
1759 struct ixgbevf_ring *ring, int index)
1761 struct ixgbe_hw *hw = &adapter->hw;
1764 srrctl = IXGBE_SRRCTL_DROP_EN;
1766 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1767 if (ring_uses_large_buffer(ring))
1768 srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1770 srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1771 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1773 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1776 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1778 struct ixgbe_hw *hw = &adapter->hw;
1780 /* PSRTYPE must be initialized in 82599 */
1781 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1782 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1783 IXGBE_PSRTYPE_L2HDR;
1785 if (adapter->num_rx_queues > 1)
1788 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1791 #define IXGBEVF_MAX_RX_DESC_POLL 10
1792 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1793 struct ixgbevf_ring *ring)
1795 struct ixgbe_hw *hw = &adapter->hw;
1796 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1798 u8 reg_idx = ring->reg_idx;
1800 if (IXGBE_REMOVED(hw->hw_addr))
1802 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1803 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1805 /* write value back with RXDCTL.ENABLE bit cleared */
1806 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1808 /* the hardware may take up to 100us to really disable the Rx queue */
1811 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1812 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1815 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1819 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1820 struct ixgbevf_ring *ring)
1822 struct ixgbe_hw *hw = &adapter->hw;
1823 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1825 u8 reg_idx = ring->reg_idx;
1827 if (IXGBE_REMOVED(hw->hw_addr))
1830 usleep_range(1000, 2000);
1831 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1832 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1835 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1840 * ixgbevf_init_rss_key - Initialize adapter RSS key
1841 * @adapter: device handle
1843 * Allocates and initializes the RSS key if it is not allocated.
1845 static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter)
1849 if (!adapter->rss_key) {
1850 rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL);
1851 if (unlikely(!rss_key))
1854 netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE);
1855 adapter->rss_key = rss_key;
1861 static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1863 struct ixgbe_hw *hw = &adapter->hw;
1864 u32 vfmrqc = 0, vfreta = 0;
1865 u16 rss_i = adapter->num_rx_queues;
1868 /* Fill out hash function seeds */
1869 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1870 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i));
1872 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
1876 adapter->rss_indir_tbl[i] = j;
1878 vfreta |= j << (i & 0x3) * 8;
1880 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1885 /* Perform hash on these packet types */
1886 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1887 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1888 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1889 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1891 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1893 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1896 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1897 struct ixgbevf_ring *ring)
1899 struct ixgbe_hw *hw = &adapter->hw;
1900 union ixgbe_adv_rx_desc *rx_desc;
1901 u64 rdba = ring->dma;
1903 u8 reg_idx = ring->reg_idx;
1905 /* disable queue to avoid issues while updating state */
1906 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1907 ixgbevf_disable_rx_queue(adapter, ring);
1909 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1910 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1911 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1912 ring->count * sizeof(union ixgbe_adv_rx_desc));
1914 #ifndef CONFIG_SPARC
1915 /* enable relaxed ordering */
1916 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1917 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1919 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1920 IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1921 IXGBE_DCA_RXCTRL_DATA_WRO_EN);
1924 /* reset head and tail pointers */
1925 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1926 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1927 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1929 /* initialize rx_buffer_info */
1930 memset(ring->rx_buffer_info, 0,
1931 sizeof(struct ixgbevf_rx_buffer) * ring->count);
1933 /* initialize Rx descriptor 0 */
1934 rx_desc = IXGBEVF_RX_DESC(ring, 0);
1935 rx_desc->wb.upper.length = 0;
1937 /* reset ntu and ntc to place SW in sync with hardwdare */
1938 ring->next_to_clean = 0;
1939 ring->next_to_use = 0;
1940 ring->next_to_alloc = 0;
1942 ixgbevf_configure_srrctl(adapter, ring, reg_idx);
1944 /* RXDCTL.RLPML does not work on 82599 */
1945 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
1946 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
1947 IXGBE_RXDCTL_RLPML_EN);
1949 #if (PAGE_SIZE < 8192)
1950 /* Limit the maximum frame size so we don't overrun the skb */
1951 if (ring_uses_build_skb(ring) &&
1952 !ring_uses_large_buffer(ring))
1953 rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
1954 IXGBE_RXDCTL_RLPML_EN;
1958 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1959 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1961 ixgbevf_rx_desc_queue_enable(adapter, ring);
1962 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1965 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
1966 struct ixgbevf_ring *rx_ring)
1968 struct net_device *netdev = adapter->netdev;
1969 unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1971 /* set build_skb and buffer size flags */
1972 clear_ring_build_skb_enabled(rx_ring);
1973 clear_ring_uses_large_buffer(rx_ring);
1975 if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
1978 set_ring_build_skb_enabled(rx_ring);
1980 if (PAGE_SIZE < 8192) {
1981 if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
1984 set_ring_uses_large_buffer(rx_ring);
1989 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1990 * @adapter: board private structure
1992 * Configure the Rx unit of the MAC after a reset.
1994 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1996 struct ixgbe_hw *hw = &adapter->hw;
1997 struct net_device *netdev = adapter->netdev;
2000 ixgbevf_setup_psrtype(adapter);
2001 if (hw->mac.type >= ixgbe_mac_X550_vf)
2002 ixgbevf_setup_vfmrqc(adapter);
2004 spin_lock_bh(&adapter->mbx_lock);
2005 /* notify the PF of our intent to use this size of frame */
2006 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
2007 spin_unlock_bh(&adapter->mbx_lock);
2009 dev_err(&adapter->pdev->dev,
2010 "Failed to set MTU at %d\n", netdev->mtu);
2012 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2013 * the Base and Length of the Rx Descriptor Ring
2015 for (i = 0; i < adapter->num_rx_queues; i++) {
2016 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
2018 ixgbevf_set_rx_buffer_len(adapter, rx_ring);
2019 ixgbevf_configure_rx_ring(adapter, rx_ring);
2023 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
2024 __be16 proto, u16 vid)
2026 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2027 struct ixgbe_hw *hw = &adapter->hw;
2030 spin_lock_bh(&adapter->mbx_lock);
2032 /* add VID to filter table */
2033 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
2035 spin_unlock_bh(&adapter->mbx_lock);
2037 /* translate error return types so error makes sense */
2038 if (err == IXGBE_ERR_MBX)
2041 if (err == IXGBE_ERR_INVALID_ARGUMENT)
2044 set_bit(vid, adapter->active_vlans);
2049 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
2050 __be16 proto, u16 vid)
2052 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2053 struct ixgbe_hw *hw = &adapter->hw;
2056 spin_lock_bh(&adapter->mbx_lock);
2058 /* remove VID from filter table */
2059 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
2061 spin_unlock_bh(&adapter->mbx_lock);
2063 clear_bit(vid, adapter->active_vlans);
2068 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
2072 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2073 ixgbevf_vlan_rx_add_vid(adapter->netdev,
2074 htons(ETH_P_8021Q), vid);
2077 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
2079 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2080 struct ixgbe_hw *hw = &adapter->hw;
2083 if ((netdev_uc_count(netdev)) > 10) {
2084 pr_err("Too many unicast filters - No Space\n");
2088 if (!netdev_uc_empty(netdev)) {
2089 struct netdev_hw_addr *ha;
2091 netdev_for_each_uc_addr(ha, netdev) {
2092 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
2096 /* If the list is empty then send message to PF driver to
2097 * clear all MAC VLANs on this VF.
2099 hw->mac.ops.set_uc_addr(hw, 0, NULL);
2106 * ixgbevf_set_rx_mode - Multicast and unicast set
2107 * @netdev: network interface device structure
2109 * The set_rx_method entry point is called whenever the multicast address
2110 * list, unicast address list or the network interface flags are updated.
2111 * This routine is responsible for configuring the hardware for proper
2112 * multicast mode and configuring requested unicast filters.
2114 static void ixgbevf_set_rx_mode(struct net_device *netdev)
2116 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2117 struct ixgbe_hw *hw = &adapter->hw;
2118 unsigned int flags = netdev->flags;
2121 /* request the most inclusive mode we need */
2122 if (flags & IFF_PROMISC)
2123 xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
2124 else if (flags & IFF_ALLMULTI)
2125 xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
2126 else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
2127 xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
2129 xcast_mode = IXGBEVF_XCAST_MODE_NONE;
2131 spin_lock_bh(&adapter->mbx_lock);
2133 hw->mac.ops.update_xcast_mode(hw, xcast_mode);
2135 /* reprogram multicast list */
2136 hw->mac.ops.update_mc_addr_list(hw, netdev);
2138 ixgbevf_write_uc_addr_list(netdev);
2140 spin_unlock_bh(&adapter->mbx_lock);
2143 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
2146 struct ixgbevf_q_vector *q_vector;
2147 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2149 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2150 q_vector = adapter->q_vector[q_idx];
2151 napi_enable(&q_vector->napi);
2155 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
2158 struct ixgbevf_q_vector *q_vector;
2159 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2161 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2162 q_vector = adapter->q_vector[q_idx];
2163 napi_disable(&q_vector->napi);
2167 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
2169 struct ixgbe_hw *hw = &adapter->hw;
2170 unsigned int def_q = 0;
2171 unsigned int num_tcs = 0;
2172 unsigned int num_rx_queues = adapter->num_rx_queues;
2173 unsigned int num_tx_queues = adapter->num_tx_queues;
2176 spin_lock_bh(&adapter->mbx_lock);
2178 /* fetch queue configuration from the PF */
2179 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2181 spin_unlock_bh(&adapter->mbx_lock);
2187 /* we need only one Tx queue */
2190 /* update default Tx ring register index */
2191 adapter->tx_ring[0]->reg_idx = def_q;
2193 /* we need as many queues as traffic classes */
2194 num_rx_queues = num_tcs;
2197 /* if we have a bad config abort request queue reset */
2198 if ((adapter->num_rx_queues != num_rx_queues) ||
2199 (adapter->num_tx_queues != num_tx_queues)) {
2200 /* force mailbox timeout to prevent further messages */
2201 hw->mbx.timeout = 0;
2203 /* wait for watchdog to come around and bail us out */
2204 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
2210 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
2212 ixgbevf_configure_dcb(adapter);
2214 ixgbevf_set_rx_mode(adapter->netdev);
2216 ixgbevf_restore_vlan(adapter);
2217 ixgbevf_ipsec_restore(adapter);
2219 ixgbevf_configure_tx(adapter);
2220 ixgbevf_configure_rx(adapter);
2223 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
2225 /* Only save pre-reset stats if there are some */
2226 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2227 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2228 adapter->stats.base_vfgprc;
2229 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2230 adapter->stats.base_vfgptc;
2231 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2232 adapter->stats.base_vfgorc;
2233 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2234 adapter->stats.base_vfgotc;
2235 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2236 adapter->stats.base_vfmprc;
2240 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2242 struct ixgbe_hw *hw = &adapter->hw;
2244 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2245 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2246 adapter->stats.last_vfgorc |=
2247 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2248 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2249 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2250 adapter->stats.last_vfgotc |=
2251 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2252 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2254 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2255 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2256 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2257 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2258 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2261 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2263 struct ixgbe_hw *hw = &adapter->hw;
2264 int api[] = { ixgbe_mbox_api_14,
2269 ixgbe_mbox_api_unknown };
2272 spin_lock_bh(&adapter->mbx_lock);
2274 while (api[idx] != ixgbe_mbox_api_unknown) {
2275 err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
2281 spin_unlock_bh(&adapter->mbx_lock);
2284 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
2286 struct net_device *netdev = adapter->netdev;
2287 struct ixgbe_hw *hw = &adapter->hw;
2289 ixgbevf_configure_msix(adapter);
2291 spin_lock_bh(&adapter->mbx_lock);
2293 if (is_valid_ether_addr(hw->mac.addr))
2294 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2296 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
2298 spin_unlock_bh(&adapter->mbx_lock);
2300 smp_mb__before_atomic();
2301 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2302 ixgbevf_napi_enable_all(adapter);
2304 /* clear any pending interrupts, may auto mask */
2305 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2306 ixgbevf_irq_enable(adapter);
2308 /* enable transmits */
2309 netif_tx_start_all_queues(netdev);
2311 ixgbevf_save_reset_stats(adapter);
2312 ixgbevf_init_last_counter_stats(adapter);
2314 hw->mac.get_link_status = 1;
2315 mod_timer(&adapter->service_timer, jiffies);
2318 void ixgbevf_up(struct ixgbevf_adapter *adapter)
2320 ixgbevf_configure(adapter);
2322 ixgbevf_up_complete(adapter);
2326 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2327 * @rx_ring: ring to free buffers from
2329 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2331 u16 i = rx_ring->next_to_clean;
2333 /* Free Rx ring sk_buff */
2335 dev_kfree_skb(rx_ring->skb);
2336 rx_ring->skb = NULL;
2339 /* Free all the Rx ring pages */
2340 while (i != rx_ring->next_to_alloc) {
2341 struct ixgbevf_rx_buffer *rx_buffer;
2343 rx_buffer = &rx_ring->rx_buffer_info[i];
2345 /* Invalidate cache lines that may have been written to by
2346 * device so that we avoid corrupting memory.
2348 dma_sync_single_range_for_cpu(rx_ring->dev,
2350 rx_buffer->page_offset,
2351 ixgbevf_rx_bufsz(rx_ring),
2354 /* free resources associated with mapping */
2355 dma_unmap_page_attrs(rx_ring->dev,
2357 ixgbevf_rx_pg_size(rx_ring),
2359 IXGBEVF_RX_DMA_ATTR);
2361 __page_frag_cache_drain(rx_buffer->page,
2362 rx_buffer->pagecnt_bias);
2365 if (i == rx_ring->count)
2369 rx_ring->next_to_alloc = 0;
2370 rx_ring->next_to_clean = 0;
2371 rx_ring->next_to_use = 0;
2375 * ixgbevf_clean_tx_ring - Free Tx Buffers
2376 * @tx_ring: ring to be cleaned
2378 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2380 u16 i = tx_ring->next_to_clean;
2381 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
2383 while (i != tx_ring->next_to_use) {
2384 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
2386 /* Free all the Tx ring sk_buffs */
2387 if (ring_is_xdp(tx_ring))
2388 page_frag_free(tx_buffer->data);
2390 dev_kfree_skb_any(tx_buffer->skb);
2392 /* unmap skb header data */
2393 dma_unmap_single(tx_ring->dev,
2394 dma_unmap_addr(tx_buffer, dma),
2395 dma_unmap_len(tx_buffer, len),
2398 /* check for eop_desc to determine the end of the packet */
2399 eop_desc = tx_buffer->next_to_watch;
2400 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2402 /* unmap remaining buffers */
2403 while (tx_desc != eop_desc) {
2407 if (unlikely(i == tx_ring->count)) {
2409 tx_buffer = tx_ring->tx_buffer_info;
2410 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
2413 /* unmap any remaining paged data */
2414 if (dma_unmap_len(tx_buffer, len))
2415 dma_unmap_page(tx_ring->dev,
2416 dma_unmap_addr(tx_buffer, dma),
2417 dma_unmap_len(tx_buffer, len),
2421 /* move us one more past the eop_desc for start of next pkt */
2424 if (unlikely(i == tx_ring->count)) {
2426 tx_buffer = tx_ring->tx_buffer_info;
2430 /* reset next_to_use and next_to_clean */
2431 tx_ring->next_to_use = 0;
2432 tx_ring->next_to_clean = 0;
2437 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2438 * @adapter: board private structure
2440 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2444 for (i = 0; i < adapter->num_rx_queues; i++)
2445 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2449 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2450 * @adapter: board private structure
2452 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2456 for (i = 0; i < adapter->num_tx_queues; i++)
2457 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
2458 for (i = 0; i < adapter->num_xdp_queues; i++)
2459 ixgbevf_clean_tx_ring(adapter->xdp_ring[i]);
2462 void ixgbevf_down(struct ixgbevf_adapter *adapter)
2464 struct net_device *netdev = adapter->netdev;
2465 struct ixgbe_hw *hw = &adapter->hw;
2468 /* signal that we are down to the interrupt handler */
2469 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2470 return; /* do nothing if already down */
2472 /* disable all enabled Rx queues */
2473 for (i = 0; i < adapter->num_rx_queues; i++)
2474 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2476 usleep_range(10000, 20000);
2478 netif_tx_stop_all_queues(netdev);
2480 /* call carrier off first to avoid false dev_watchdog timeouts */
2481 netif_carrier_off(netdev);
2482 netif_tx_disable(netdev);
2484 ixgbevf_irq_disable(adapter);
2486 ixgbevf_napi_disable_all(adapter);
2488 del_timer_sync(&adapter->service_timer);
2490 /* disable transmits in the hardware now that interrupts are off */
2491 for (i = 0; i < adapter->num_tx_queues; i++) {
2492 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2494 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2495 IXGBE_TXDCTL_SWFLSH);
2498 for (i = 0; i < adapter->num_xdp_queues; i++) {
2499 u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
2501 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2502 IXGBE_TXDCTL_SWFLSH);
2505 if (!pci_channel_offline(adapter->pdev))
2506 ixgbevf_reset(adapter);
2508 ixgbevf_clean_all_tx_rings(adapter);
2509 ixgbevf_clean_all_rx_rings(adapter);
2512 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2514 WARN_ON(in_interrupt());
2516 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2519 ixgbevf_down(adapter);
2520 ixgbevf_up(adapter);
2522 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2525 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2527 struct ixgbe_hw *hw = &adapter->hw;
2528 struct net_device *netdev = adapter->netdev;
2530 if (hw->mac.ops.reset_hw(hw)) {
2531 hw_dbg(hw, "PF still resetting\n");
2533 hw->mac.ops.init_hw(hw);
2534 ixgbevf_negotiate_api(adapter);
2537 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2538 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2539 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2542 adapter->last_reset = jiffies;
2545 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2548 int vector_threshold;
2550 /* We'll want at least 2 (vector_threshold):
2551 * 1) TxQ[0] + RxQ[0] handler
2552 * 2) Other (Link Status Change, etc.)
2554 vector_threshold = MIN_MSIX_COUNT;
2556 /* The more we get, the more we will assign to Tx/Rx Cleanup
2557 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2558 * Right now, we simply care about how many we'll get; we'll
2559 * set them up later while requesting irq's.
2561 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2562 vector_threshold, vectors);
2565 dev_err(&adapter->pdev->dev,
2566 "Unable to allocate MSI-X interrupts\n");
2567 kfree(adapter->msix_entries);
2568 adapter->msix_entries = NULL;
2572 /* Adjust for only the vectors we'll use, which is minimum
2573 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2574 * vectors we were allocated.
2576 adapter->num_msix_vectors = vectors;
2582 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2583 * @adapter: board private structure to initialize
2585 * This is the top level queue allocation routine. The order here is very
2586 * important, starting with the "most" number of features turned on at once,
2587 * and ending with the smallest set of features. This way large combinations
2588 * can be allocated if they're turned on, and smaller combinations are the
2589 * fallthrough conditions.
2592 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2594 struct ixgbe_hw *hw = &adapter->hw;
2595 unsigned int def_q = 0;
2596 unsigned int num_tcs = 0;
2599 /* Start with base case */
2600 adapter->num_rx_queues = 1;
2601 adapter->num_tx_queues = 1;
2602 adapter->num_xdp_queues = 0;
2604 spin_lock_bh(&adapter->mbx_lock);
2606 /* fetch queue configuration from the PF */
2607 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2609 spin_unlock_bh(&adapter->mbx_lock);
2614 /* we need as many queues as traffic classes */
2616 adapter->num_rx_queues = num_tcs;
2618 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2620 switch (hw->api_version) {
2621 case ixgbe_mbox_api_11:
2622 case ixgbe_mbox_api_12:
2623 case ixgbe_mbox_api_13:
2624 case ixgbe_mbox_api_14:
2625 if (adapter->xdp_prog &&
2626 hw->mac.max_tx_queues == rss)
2627 rss = rss > 3 ? 2 : 1;
2629 adapter->num_rx_queues = rss;
2630 adapter->num_tx_queues = rss;
2631 adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0;
2639 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2640 * @adapter: board private structure to initialize
2642 * Attempt to configure the interrupts using the best available
2643 * capabilities of the hardware and the kernel.
2645 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2647 int vector, v_budget;
2649 /* It's easy to be greedy for MSI-X vectors, but it really
2650 * doesn't do us much good if we have a lot more vectors
2651 * than CPU's. So let's be conservative and only ask for
2652 * (roughly) the same number of vectors as there are CPU's.
2653 * The default is to use pairs of vectors.
2655 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2656 v_budget = min_t(int, v_budget, num_online_cpus());
2657 v_budget += NON_Q_VECTORS;
2659 adapter->msix_entries = kcalloc(v_budget,
2660 sizeof(struct msix_entry), GFP_KERNEL);
2661 if (!adapter->msix_entries)
2664 for (vector = 0; vector < v_budget; vector++)
2665 adapter->msix_entries[vector].entry = vector;
2667 /* A failure in MSI-X entry allocation isn't fatal, but the VF driver
2668 * does not support any other modes, so we will simply fail here. Note
2669 * that we clean up the msix_entries pointer else-where.
2671 return ixgbevf_acquire_msix_vectors(adapter, v_budget);
2674 static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
2675 struct ixgbevf_ring_container *head)
2677 ring->next = head->ring;
2683 * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector
2684 * @adapter: board private structure to initialize
2685 * @v_idx: index of vector in adapter struct
2686 * @txr_count: number of Tx rings for q vector
2687 * @txr_idx: index of first Tx ring to assign
2688 * @xdp_count: total number of XDP rings to allocate
2689 * @xdp_idx: index of first XDP ring to allocate
2690 * @rxr_count: number of Rx rings for q vector
2691 * @rxr_idx: index of first Rx ring to assign
2693 * We allocate one q_vector. If allocation fails we return -ENOMEM.
2695 static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
2696 int txr_count, int txr_idx,
2697 int xdp_count, int xdp_idx,
2698 int rxr_count, int rxr_idx)
2700 struct ixgbevf_q_vector *q_vector;
2701 int reg_idx = txr_idx + xdp_idx;
2702 struct ixgbevf_ring *ring;
2703 int ring_count, size;
2705 ring_count = txr_count + xdp_count + rxr_count;
2706 size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
2708 /* allocate q_vector and rings */
2709 q_vector = kzalloc(size, GFP_KERNEL);
2713 /* initialize NAPI */
2714 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
2716 /* tie q_vector and adapter together */
2717 adapter->q_vector[v_idx] = q_vector;
2718 q_vector->adapter = adapter;
2719 q_vector->v_idx = v_idx;
2721 /* initialize pointer to rings */
2722 ring = q_vector->ring;
2725 /* assign generic ring traits */
2726 ring->dev = &adapter->pdev->dev;
2727 ring->netdev = adapter->netdev;
2729 /* configure backlink on ring */
2730 ring->q_vector = q_vector;
2732 /* update q_vector Tx values */
2733 ixgbevf_add_ring(ring, &q_vector->tx);
2735 /* apply Tx specific ring traits */
2736 ring->count = adapter->tx_ring_count;
2737 ring->queue_index = txr_idx;
2738 ring->reg_idx = reg_idx;
2740 /* assign ring to adapter */
2741 adapter->tx_ring[txr_idx] = ring;
2743 /* update count and index */
2748 /* push pointer to next ring */
2753 /* assign generic ring traits */
2754 ring->dev = &adapter->pdev->dev;
2755 ring->netdev = adapter->netdev;
2757 /* configure backlink on ring */
2758 ring->q_vector = q_vector;
2760 /* update q_vector Tx values */
2761 ixgbevf_add_ring(ring, &q_vector->tx);
2763 /* apply Tx specific ring traits */
2764 ring->count = adapter->tx_ring_count;
2765 ring->queue_index = xdp_idx;
2766 ring->reg_idx = reg_idx;
2769 /* assign ring to adapter */
2770 adapter->xdp_ring[xdp_idx] = ring;
2772 /* update count and index */
2777 /* push pointer to next ring */
2782 /* assign generic ring traits */
2783 ring->dev = &adapter->pdev->dev;
2784 ring->netdev = adapter->netdev;
2786 /* configure backlink on ring */
2787 ring->q_vector = q_vector;
2789 /* update q_vector Rx values */
2790 ixgbevf_add_ring(ring, &q_vector->rx);
2792 /* apply Rx specific ring traits */
2793 ring->count = adapter->rx_ring_count;
2794 ring->queue_index = rxr_idx;
2795 ring->reg_idx = rxr_idx;
2797 /* assign ring to adapter */
2798 adapter->rx_ring[rxr_idx] = ring;
2800 /* update count and index */
2804 /* push pointer to next ring */
2812 * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector
2813 * @adapter: board private structure to initialize
2814 * @v_idx: index of vector in adapter struct
2816 * This function frees the memory allocated to the q_vector. In addition if
2817 * NAPI is enabled it will delete any references to the NAPI struct prior
2818 * to freeing the q_vector.
2820 static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)
2822 struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];
2823 struct ixgbevf_ring *ring;
2825 ixgbevf_for_each_ring(ring, q_vector->tx) {
2826 if (ring_is_xdp(ring))
2827 adapter->xdp_ring[ring->queue_index] = NULL;
2829 adapter->tx_ring[ring->queue_index] = NULL;
2832 ixgbevf_for_each_ring(ring, q_vector->rx)
2833 adapter->rx_ring[ring->queue_index] = NULL;
2835 adapter->q_vector[v_idx] = NULL;
2836 netif_napi_del(&q_vector->napi);
2838 /* ixgbevf_get_stats() might access the rings on this vector,
2839 * we must wait a grace period before freeing it.
2841 kfree_rcu(q_vector, rcu);
2845 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2846 * @adapter: board private structure to initialize
2848 * We allocate one q_vector per queue interrupt. If allocation fails we
2851 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2853 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2854 int rxr_remaining = adapter->num_rx_queues;
2855 int txr_remaining = adapter->num_tx_queues;
2856 int xdp_remaining = adapter->num_xdp_queues;
2857 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
2860 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
2861 for (; rxr_remaining; v_idx++, q_vectors--) {
2862 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2864 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2865 0, 0, 0, 0, rqpv, rxr_idx);
2869 /* update counts and index */
2870 rxr_remaining -= rqpv;
2875 for (; q_vectors; v_idx++, q_vectors--) {
2876 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2877 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
2878 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors);
2880 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2888 /* update counts and index */
2889 rxr_remaining -= rqpv;
2891 txr_remaining -= tqpv;
2893 xdp_remaining -= xqpv;
2902 ixgbevf_free_q_vector(adapter, v_idx);
2909 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2910 * @adapter: board private structure to initialize
2912 * This function frees the memory allocated to the q_vectors. In addition if
2913 * NAPI is enabled it will delete any references to the NAPI struct prior
2914 * to freeing the q_vector.
2916 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2918 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2922 ixgbevf_free_q_vector(adapter, q_vectors);
2927 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2928 * @adapter: board private structure
2931 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2933 if (!adapter->msix_entries)
2936 pci_disable_msix(adapter->pdev);
2937 kfree(adapter->msix_entries);
2938 adapter->msix_entries = NULL;
2942 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2943 * @adapter: board private structure to initialize
2946 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2950 /* Number of supported queues */
2951 ixgbevf_set_num_queues(adapter);
2953 err = ixgbevf_set_interrupt_capability(adapter);
2955 hw_dbg(&adapter->hw,
2956 "Unable to setup interrupt capabilities\n");
2957 goto err_set_interrupt;
2960 err = ixgbevf_alloc_q_vectors(adapter);
2962 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
2963 goto err_alloc_q_vectors;
2966 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n",
2967 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
2968 adapter->num_rx_queues, adapter->num_tx_queues,
2969 adapter->num_xdp_queues);
2971 set_bit(__IXGBEVF_DOWN, &adapter->state);
2974 err_alloc_q_vectors:
2975 ixgbevf_reset_interrupt_capability(adapter);
2981 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2982 * @adapter: board private structure to clear interrupt scheme on
2984 * We go through and clear interrupt specific resources and reset the structure
2985 * to pre-load conditions
2987 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2989 adapter->num_tx_queues = 0;
2990 adapter->num_xdp_queues = 0;
2991 adapter->num_rx_queues = 0;
2993 ixgbevf_free_q_vectors(adapter);
2994 ixgbevf_reset_interrupt_capability(adapter);
2998 * ixgbevf_sw_init - Initialize general software structures
2999 * @adapter: board private structure to initialize
3001 * ixgbevf_sw_init initializes the Adapter private data structure.
3002 * Fields are initialized based on PCI device information and
3003 * OS network device settings (MTU size).
3005 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
3007 struct ixgbe_hw *hw = &adapter->hw;
3008 struct pci_dev *pdev = adapter->pdev;
3009 struct net_device *netdev = adapter->netdev;
3012 /* PCI config space info */
3013 hw->vendor_id = pdev->vendor;
3014 hw->device_id = pdev->device;
3015 hw->revision_id = pdev->revision;
3016 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3017 hw->subsystem_device_id = pdev->subsystem_device;
3019 hw->mbx.ops.init_params(hw);
3021 if (hw->mac.type >= ixgbe_mac_X550_vf) {
3022 err = ixgbevf_init_rss_key(adapter);
3027 /* assume legacy case in which PF would only give VF 2 queues */
3028 hw->mac.max_tx_queues = 2;
3029 hw->mac.max_rx_queues = 2;
3031 /* lock to protect mailbox accesses */
3032 spin_lock_init(&adapter->mbx_lock);
3034 err = hw->mac.ops.reset_hw(hw);
3036 dev_info(&pdev->dev,
3037 "PF still in reset state. Is the PF interface up?\n");
3039 err = hw->mac.ops.init_hw(hw);
3041 pr_err("init_shared_code failed: %d\n", err);
3044 ixgbevf_negotiate_api(adapter);
3045 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
3047 dev_info(&pdev->dev, "Error reading MAC address\n");
3048 else if (is_zero_ether_addr(adapter->hw.mac.addr))
3049 dev_info(&pdev->dev,
3050 "MAC address not assigned by administrator.\n");
3051 ether_addr_copy(netdev->dev_addr, hw->mac.addr);
3054 if (!is_valid_ether_addr(netdev->dev_addr)) {
3055 dev_info(&pdev->dev, "Assigning random MAC address\n");
3056 eth_hw_addr_random(netdev);
3057 ether_addr_copy(hw->mac.addr, netdev->dev_addr);
3058 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr);
3061 /* Enable dynamic interrupt throttling rates */
3062 adapter->rx_itr_setting = 1;
3063 adapter->tx_itr_setting = 1;
3065 /* set default ring sizes */
3066 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
3067 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
3069 set_bit(__IXGBEVF_DOWN, &adapter->state);
3076 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
3078 u32 current_counter = IXGBE_READ_REG(hw, reg); \
3079 if (current_counter < last_counter) \
3080 counter += 0x100000000LL; \
3081 last_counter = current_counter; \
3082 counter &= 0xFFFFFFFF00000000LL; \
3083 counter |= current_counter; \
3086 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
3088 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
3089 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
3090 u64 current_counter = (current_counter_msb << 32) | \
3091 current_counter_lsb; \
3092 if (current_counter < last_counter) \
3093 counter += 0x1000000000LL; \
3094 last_counter = current_counter; \
3095 counter &= 0xFFFFFFF000000000LL; \
3096 counter |= current_counter; \
3099 * ixgbevf_update_stats - Update the board statistics counters.
3100 * @adapter: board private structure
3102 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
3104 struct ixgbe_hw *hw = &adapter->hw;
3105 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
3106 u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
3109 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3110 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3113 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3114 adapter->stats.vfgprc);
3115 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3116 adapter->stats.vfgptc);
3117 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3118 adapter->stats.last_vfgorc,
3119 adapter->stats.vfgorc);
3120 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3121 adapter->stats.last_vfgotc,
3122 adapter->stats.vfgotc);
3123 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3124 adapter->stats.vfmprc);
3126 for (i = 0; i < adapter->num_rx_queues; i++) {
3127 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
3129 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
3130 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
3131 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
3132 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
3135 adapter->hw_csum_rx_error = hw_csum_rx_error;
3136 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
3137 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
3138 adapter->alloc_rx_page = alloc_rx_page;
3142 * ixgbevf_service_timer - Timer Call-back
3143 * @t: pointer to timer_list struct
3145 static void ixgbevf_service_timer(struct timer_list *t)
3147 struct ixgbevf_adapter *adapter = from_timer(adapter, t,
3150 /* Reset the timer */
3151 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
3153 ixgbevf_service_event_schedule(adapter);
3156 static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
3158 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
3162 /* If we're already down or resetting, just bail */
3163 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3164 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
3165 test_bit(__IXGBEVF_RESETTING, &adapter->state)) {
3170 adapter->tx_timeout_count++;
3172 ixgbevf_reinit_locked(adapter);
3177 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
3178 * @adapter: pointer to the device adapter structure
3180 * This function serves two purposes. First it strobes the interrupt lines
3181 * in order to make certain interrupts are occurring. Secondly it sets the
3182 * bits needed to check for TX hangs. As a result we should immediately
3183 * determine if a hang has occurred.
3185 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
3187 struct ixgbe_hw *hw = &adapter->hw;
3191 /* If we're down or resetting, just bail */
3192 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3193 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3196 /* Force detection of hung controller */
3197 if (netif_carrier_ok(adapter->netdev)) {
3198 for (i = 0; i < adapter->num_tx_queues; i++)
3199 set_check_for_tx_hang(adapter->tx_ring[i]);
3200 for (i = 0; i < adapter->num_xdp_queues; i++)
3201 set_check_for_tx_hang(adapter->xdp_ring[i]);
3204 /* get one bit for every active Tx/Rx interrupt vector */
3205 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
3206 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
3208 if (qv->rx.ring || qv->tx.ring)
3212 /* Cause software interrupt to ensure rings are cleaned */
3213 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
3217 * ixgbevf_watchdog_update_link - update the link status
3218 * @adapter: pointer to the device adapter structure
3220 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
3222 struct ixgbe_hw *hw = &adapter->hw;
3223 u32 link_speed = adapter->link_speed;
3224 bool link_up = adapter->link_up;
3227 spin_lock_bh(&adapter->mbx_lock);
3229 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3231 spin_unlock_bh(&adapter->mbx_lock);
3233 /* if check for link returns error we will need to reset */
3234 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
3235 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
3239 adapter->link_up = link_up;
3240 adapter->link_speed = link_speed;
3244 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
3245 * print link up message
3246 * @adapter: pointer to the device adapter structure
3248 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
3250 struct net_device *netdev = adapter->netdev;
3252 /* only continue if link was previously down */
3253 if (netif_carrier_ok(netdev))
3256 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
3257 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
3259 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
3261 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
3265 netif_carrier_on(netdev);
3269 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
3270 * print link down message
3271 * @adapter: pointer to the adapter structure
3273 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
3275 struct net_device *netdev = adapter->netdev;
3277 adapter->link_speed = 0;
3279 /* only continue if link was up previously */
3280 if (!netif_carrier_ok(netdev))
3283 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
3285 netif_carrier_off(netdev);
3289 * ixgbevf_watchdog_subtask - worker thread to bring link up
3290 * @adapter: board private structure
3292 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
3294 /* if interface is down do nothing */
3295 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3296 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3299 ixgbevf_watchdog_update_link(adapter);
3301 if (adapter->link_up)
3302 ixgbevf_watchdog_link_is_up(adapter);
3304 ixgbevf_watchdog_link_is_down(adapter);
3306 ixgbevf_update_stats(adapter);
3310 * ixgbevf_service_task - manages and runs subtasks
3311 * @work: pointer to work_struct containing our data
3313 static void ixgbevf_service_task(struct work_struct *work)
3315 struct ixgbevf_adapter *adapter = container_of(work,
3316 struct ixgbevf_adapter,
3318 struct ixgbe_hw *hw = &adapter->hw;
3320 if (IXGBE_REMOVED(hw->hw_addr)) {
3321 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
3323 ixgbevf_down(adapter);
3329 ixgbevf_queue_reset_subtask(adapter);
3330 ixgbevf_reset_subtask(adapter);
3331 ixgbevf_watchdog_subtask(adapter);
3332 ixgbevf_check_hang_subtask(adapter);
3334 ixgbevf_service_event_complete(adapter);
3338 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
3339 * @tx_ring: Tx descriptor ring for a specific queue
3341 * Free all transmit software resources
3343 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
3345 ixgbevf_clean_tx_ring(tx_ring);
3347 vfree(tx_ring->tx_buffer_info);
3348 tx_ring->tx_buffer_info = NULL;
3350 /* if not set, then don't free */
3354 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
3357 tx_ring->desc = NULL;
3361 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
3362 * @adapter: board private structure
3364 * Free all transmit software resources
3366 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
3370 for (i = 0; i < adapter->num_tx_queues; i++)
3371 if (adapter->tx_ring[i]->desc)
3372 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3373 for (i = 0; i < adapter->num_xdp_queues; i++)
3374 if (adapter->xdp_ring[i]->desc)
3375 ixgbevf_free_tx_resources(adapter->xdp_ring[i]);
3379 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
3380 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
3382 * Return 0 on success, negative on failure
3384 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
3386 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3389 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
3390 tx_ring->tx_buffer_info = vmalloc(size);
3391 if (!tx_ring->tx_buffer_info)
3394 u64_stats_init(&tx_ring->syncp);
3396 /* round up to nearest 4K */
3397 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3398 tx_ring->size = ALIGN(tx_ring->size, 4096);
3400 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
3401 &tx_ring->dma, GFP_KERNEL);
3408 vfree(tx_ring->tx_buffer_info);
3409 tx_ring->tx_buffer_info = NULL;
3410 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
3415 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
3416 * @adapter: board private structure
3418 * If this function returns with an error, then it's possible one or
3419 * more of the rings is populated (while the rest are not). It is the
3420 * callers duty to clean those orphaned rings.
3422 * Return 0 on success, negative on failure
3424 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3426 int i, j = 0, err = 0;
3428 for (i = 0; i < adapter->num_tx_queues; i++) {
3429 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
3432 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
3436 for (j = 0; j < adapter->num_xdp_queues; j++) {
3437 err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]);
3440 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
3446 /* rewind the index freeing the rings as we go */
3448 ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
3450 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3456 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3457 * @adapter: board private structure
3458 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3460 * Returns 0 on success, negative on failure
3462 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
3463 struct ixgbevf_ring *rx_ring)
3467 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
3468 rx_ring->rx_buffer_info = vmalloc(size);
3469 if (!rx_ring->rx_buffer_info)
3472 u64_stats_init(&rx_ring->syncp);
3474 /* Round up to nearest 4K */
3475 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3476 rx_ring->size = ALIGN(rx_ring->size, 4096);
3478 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
3479 &rx_ring->dma, GFP_KERNEL);
3484 /* XDP RX-queue info */
3485 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
3486 rx_ring->queue_index) < 0)
3489 rx_ring->xdp_prog = adapter->xdp_prog;
3493 vfree(rx_ring->rx_buffer_info);
3494 rx_ring->rx_buffer_info = NULL;
3495 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
3500 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3501 * @adapter: board private structure
3503 * If this function returns with an error, then it's possible one or
3504 * more of the rings is populated (while the rest are not). It is the
3505 * callers duty to clean those orphaned rings.
3507 * Return 0 on success, negative on failure
3509 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3513 for (i = 0; i < adapter->num_rx_queues; i++) {
3514 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
3517 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
3523 /* rewind the index freeing the rings as we go */
3525 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3530 * ixgbevf_free_rx_resources - Free Rx Resources
3531 * @rx_ring: ring to clean the resources from
3533 * Free all receive software resources
3535 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
3537 ixgbevf_clean_rx_ring(rx_ring);
3539 rx_ring->xdp_prog = NULL;
3540 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
3541 vfree(rx_ring->rx_buffer_info);
3542 rx_ring->rx_buffer_info = NULL;
3544 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
3547 rx_ring->desc = NULL;
3551 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3552 * @adapter: board private structure
3554 * Free all receive software resources
3556 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3560 for (i = 0; i < adapter->num_rx_queues; i++)
3561 if (adapter->rx_ring[i]->desc)
3562 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3566 * ixgbevf_open - Called when a network interface is made active
3567 * @netdev: network interface device structure
3569 * Returns 0 on success, negative value on failure
3571 * The open entry point is called when a network interface is made
3572 * active by the system (IFF_UP). At this point all resources needed
3573 * for transmit and receive operations are allocated, the interrupt
3574 * handler is registered with the OS, the watchdog timer is started,
3575 * and the stack is notified that the interface is ready.
3577 int ixgbevf_open(struct net_device *netdev)
3579 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3580 struct ixgbe_hw *hw = &adapter->hw;
3583 /* A previous failure to open the device because of a lack of
3584 * available MSIX vector resources may have reset the number
3585 * of msix vectors variable to zero. The only way to recover
3586 * is to unload/reload the driver and hope that the system has
3587 * been able to recover some MSIX vector resources.
3589 if (!adapter->num_msix_vectors)
3592 if (hw->adapter_stopped) {
3593 ixgbevf_reset(adapter);
3594 /* if adapter is still stopped then PF isn't up and
3595 * the VF can't start.
3597 if (hw->adapter_stopped) {
3598 err = IXGBE_ERR_MBX;
3599 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3600 goto err_setup_reset;
3604 /* disallow open during test */
3605 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3608 netif_carrier_off(netdev);
3610 /* allocate transmit descriptors */
3611 err = ixgbevf_setup_all_tx_resources(adapter);
3615 /* allocate receive descriptors */
3616 err = ixgbevf_setup_all_rx_resources(adapter);
3620 ixgbevf_configure(adapter);
3622 err = ixgbevf_request_irq(adapter);
3626 /* Notify the stack of the actual queue counts. */
3627 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
3629 goto err_set_queues;
3631 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
3633 goto err_set_queues;
3635 ixgbevf_up_complete(adapter);
3640 ixgbevf_free_irq(adapter);
3642 ixgbevf_free_all_rx_resources(adapter);
3644 ixgbevf_free_all_tx_resources(adapter);
3646 ixgbevf_reset(adapter);
3653 * ixgbevf_close_suspend - actions necessary to both suspend and close flows
3654 * @adapter: the private adapter struct
3656 * This function should contain the necessary work common to both suspending
3657 * and closing of the device.
3659 static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter)
3661 ixgbevf_down(adapter);
3662 ixgbevf_free_irq(adapter);
3663 ixgbevf_free_all_tx_resources(adapter);
3664 ixgbevf_free_all_rx_resources(adapter);
3668 * ixgbevf_close - Disables a network interface
3669 * @netdev: network interface device structure
3671 * Returns 0, this is not allowed to fail
3673 * The close entry point is called when an interface is de-activated
3674 * by the OS. The hardware is still under the drivers control, but
3675 * needs to be disabled. A global MAC reset is issued to stop the
3676 * hardware, and all transmit and receive resources are freed.
3678 int ixgbevf_close(struct net_device *netdev)
3680 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3682 if (netif_device_present(netdev))
3683 ixgbevf_close_suspend(adapter);
3688 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3690 struct net_device *dev = adapter->netdev;
3692 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
3696 /* if interface is down do nothing */
3697 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3698 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3701 /* Hardware has to reinitialize queues and interrupts to
3702 * match packet buffer alignment. Unfortunately, the
3703 * hardware is not flexible enough to do this dynamically.
3707 if (netif_running(dev))
3710 ixgbevf_clear_interrupt_scheme(adapter);
3711 ixgbevf_init_interrupt_scheme(adapter);
3713 if (netif_running(dev))
3719 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3720 u32 vlan_macip_lens, u32 fceof_saidx,
3721 u32 type_tucmd, u32 mss_l4len_idx)
3723 struct ixgbe_adv_tx_context_desc *context_desc;
3724 u16 i = tx_ring->next_to_use;
3726 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3729 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3731 /* set bits to identify this as an advanced context descriptor */
3732 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3734 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3735 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
3736 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3737 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3740 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3741 struct ixgbevf_tx_buffer *first,
3743 struct ixgbevf_ipsec_tx_data *itd)
3745 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
3746 struct sk_buff *skb = first->skb;
3756 u32 paylen, l4_offset;
3757 u32 fceof_saidx = 0;
3760 if (skb->ip_summed != CHECKSUM_PARTIAL)
3763 if (!skb_is_gso(skb))
3766 err = skb_cow_head(skb, 0);
3770 if (eth_p_mpls(first->protocol))
3771 ip.hdr = skb_inner_network_header(skb);
3773 ip.hdr = skb_network_header(skb);
3774 l4.hdr = skb_checksum_start(skb);
3776 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3777 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3779 /* initialize outer IP header fields */
3780 if (ip.v4->version == 4) {
3781 unsigned char *csum_start = skb_checksum_start(skb);
3782 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
3783 int len = csum_start - trans_start;
3785 /* IP header will have to cancel out any data that
3786 * is not a part of the outer IP header, so set to
3787 * a reverse csum if needed, else init check to 0.
3789 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
3790 csum_fold(csum_partial(trans_start,
3792 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3795 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3796 IXGBE_TX_FLAGS_CSUM |
3797 IXGBE_TX_FLAGS_IPV4;
3799 ip.v6->payload_len = 0;
3800 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3801 IXGBE_TX_FLAGS_CSUM;
3804 /* determine offset of inner transport header */
3805 l4_offset = l4.hdr - skb->data;
3807 /* compute length of segmentation header */
3808 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3810 /* remove payload length from inner checksum */
3811 paylen = skb->len - l4_offset;
3812 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
3814 /* update gso size and bytecount with header size */
3815 first->gso_segs = skb_shinfo(skb)->gso_segs;
3816 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3818 /* mss_l4len_id: use 1 as index for TSO */
3819 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
3820 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3821 mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
3823 fceof_saidx |= itd->pfsa;
3824 type_tucmd |= itd->flags | itd->trailer_len;
3826 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3827 vlan_macip_lens = l4.hdr - ip.hdr;
3828 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
3829 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3831 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
3837 static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
3839 unsigned int offset = 0;
3841 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
3843 return offset == skb_checksum_start_offset(skb);
3846 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3847 struct ixgbevf_tx_buffer *first,
3848 struct ixgbevf_ipsec_tx_data *itd)
3850 struct sk_buff *skb = first->skb;
3851 u32 vlan_macip_lens = 0;
3852 u32 fceof_saidx = 0;
3855 if (skb->ip_summed != CHECKSUM_PARTIAL)
3858 switch (skb->csum_offset) {
3859 case offsetof(struct tcphdr, check):
3860 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3862 case offsetof(struct udphdr, check):
3864 case offsetof(struct sctphdr, checksum):
3865 /* validate that this is actually an SCTP request */
3866 if (((first->protocol == htons(ETH_P_IP)) &&
3867 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
3868 ((first->protocol == htons(ETH_P_IPV6)) &&
3869 ixgbevf_ipv6_csum_is_sctp(skb))) {
3870 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3875 skb_checksum_help(skb);
3879 if (first->protocol == htons(ETH_P_IP))
3880 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3882 /* update TX checksum flag */
3883 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3884 vlan_macip_lens = skb_checksum_start_offset(skb) -
3885 skb_network_offset(skb);
3887 /* vlan_macip_lens: MACLEN, VLAN tag */
3888 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3889 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3891 fceof_saidx |= itd->pfsa;
3892 type_tucmd |= itd->flags | itd->trailer_len;
3894 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3895 fceof_saidx, type_tucmd, 0);
3898 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3900 /* set type for advanced descriptor with frame checksum insertion */
3901 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3902 IXGBE_ADVTXD_DCMD_IFCS |
3903 IXGBE_ADVTXD_DCMD_DEXT);
3905 /* set HW VLAN bit if VLAN is present */
3906 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3907 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3909 /* set segmentation enable bits for TSO/FSO */
3910 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3911 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3916 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3917 u32 tx_flags, unsigned int paylen)
3919 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3921 /* enable L4 checksum for TSO and TX checksum offload */
3922 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3923 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3925 /* enble IPv4 checksum for TSO */
3926 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3927 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3930 if (tx_flags & IXGBE_TX_FLAGS_IPSEC)
3931 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC);
3933 /* use index 1 context for TSO/FSO/FCOE/IPSEC */
3934 if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC))
3935 olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
3937 /* Check Context must be set if Tx switch is enabled, which it
3938 * always is for case where virtual functions are running
3940 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3942 tx_desc->read.olinfo_status = olinfo_status;
3945 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3946 struct ixgbevf_tx_buffer *first,
3949 struct sk_buff *skb = first->skb;
3950 struct ixgbevf_tx_buffer *tx_buffer;
3951 union ixgbe_adv_tx_desc *tx_desc;
3952 struct skb_frag_struct *frag;
3954 unsigned int data_len, size;
3955 u32 tx_flags = first->tx_flags;
3956 __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3957 u16 i = tx_ring->next_to_use;
3959 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3961 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
3963 size = skb_headlen(skb);
3964 data_len = skb->data_len;
3966 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3970 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3971 if (dma_mapping_error(tx_ring->dev, dma))
3974 /* record length, and DMA address */
3975 dma_unmap_len_set(tx_buffer, len, size);
3976 dma_unmap_addr_set(tx_buffer, dma, dma);
3978 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3980 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3981 tx_desc->read.cmd_type_len =
3982 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3986 if (i == tx_ring->count) {
3987 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3990 tx_desc->read.olinfo_status = 0;
3992 dma += IXGBE_MAX_DATA_PER_TXD;
3993 size -= IXGBE_MAX_DATA_PER_TXD;
3995 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3998 if (likely(!data_len))
4001 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4005 if (i == tx_ring->count) {
4006 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
4009 tx_desc->read.olinfo_status = 0;
4011 size = skb_frag_size(frag);
4014 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
4017 tx_buffer = &tx_ring->tx_buffer_info[i];
4020 /* write last descriptor with RS and EOP bits */
4021 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
4022 tx_desc->read.cmd_type_len = cmd_type;
4024 /* set the timestamp */
4025 first->time_stamp = jiffies;
4027 skb_tx_timestamp(skb);
4029 /* Force memory writes to complete before letting h/w know there
4030 * are new descriptors to fetch. (Only applicable for weak-ordered
4031 * memory model archs, such as IA-64).
4033 * We also need this memory barrier (wmb) to make certain all of the
4034 * status bits have been updated before next_to_watch is written.
4038 /* set next_to_watch value indicating a packet is present */
4039 first->next_to_watch = tx_desc;
4042 if (i == tx_ring->count)
4045 tx_ring->next_to_use = i;
4047 /* notify HW of packet */
4048 ixgbevf_write_tail(tx_ring, i);
4052 dev_err(tx_ring->dev, "TX DMA map failed\n");
4053 tx_buffer = &tx_ring->tx_buffer_info[i];
4055 /* clear dma mappings for failed tx_buffer_info map */
4056 while (tx_buffer != first) {
4057 if (dma_unmap_len(tx_buffer, len))
4058 dma_unmap_page(tx_ring->dev,
4059 dma_unmap_addr(tx_buffer, dma),
4060 dma_unmap_len(tx_buffer, len),
4062 dma_unmap_len_set(tx_buffer, len, 0);
4065 i += tx_ring->count;
4066 tx_buffer = &tx_ring->tx_buffer_info[i];
4069 if (dma_unmap_len(tx_buffer, len))
4070 dma_unmap_single(tx_ring->dev,
4071 dma_unmap_addr(tx_buffer, dma),
4072 dma_unmap_len(tx_buffer, len),
4074 dma_unmap_len_set(tx_buffer, len, 0);
4076 dev_kfree_skb_any(tx_buffer->skb);
4077 tx_buffer->skb = NULL;
4079 tx_ring->next_to_use = i;
4082 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4084 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
4085 /* Herbert's original patch had:
4086 * smp_mb__after_netif_stop_queue();
4087 * but since that doesn't exist yet, just open code it.
4091 /* We need to check again in a case another CPU has just
4092 * made room available.
4094 if (likely(ixgbevf_desc_unused(tx_ring) < size))
4097 /* A reprieve! - use start_queue because it doesn't call schedule */
4098 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
4099 ++tx_ring->tx_stats.restart_queue;
4104 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4106 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
4108 return __ixgbevf_maybe_stop_tx(tx_ring, size);
4111 static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
4112 struct ixgbevf_ring *tx_ring)
4114 struct ixgbevf_tx_buffer *first;
4117 u16 count = TXD_USE_COUNT(skb_headlen(skb));
4118 struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 };
4119 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4123 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
4125 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
4126 dev_kfree_skb_any(skb);
4127 return NETDEV_TX_OK;
4130 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
4131 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
4132 * + 2 desc gap to keep tail from touching head,
4133 * + 1 desc for context descriptor,
4134 * otherwise try next time
4136 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4137 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
4138 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
4140 count += skb_shinfo(skb)->nr_frags;
4142 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
4143 tx_ring->tx_stats.tx_busy++;
4144 return NETDEV_TX_BUSY;
4147 /* record the location of the first descriptor for this packet */
4148 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4150 first->bytecount = skb->len;
4151 first->gso_segs = 1;
4153 if (skb_vlan_tag_present(skb)) {
4154 tx_flags |= skb_vlan_tag_get(skb);
4155 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
4156 tx_flags |= IXGBE_TX_FLAGS_VLAN;
4159 /* record initial flags and protocol */
4160 first->tx_flags = tx_flags;
4161 first->protocol = vlan_get_protocol(skb);
4163 #ifdef CONFIG_IXGBEVF_IPSEC
4164 if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
4167 tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
4171 ixgbevf_tx_csum(tx_ring, first, &ipsec_tx);
4173 ixgbevf_tx_map(tx_ring, first, hdr_len);
4175 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
4177 return NETDEV_TX_OK;
4180 dev_kfree_skb_any(first->skb);
4183 return NETDEV_TX_OK;
4186 static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4188 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4189 struct ixgbevf_ring *tx_ring;
4191 if (skb->len <= 0) {
4192 dev_kfree_skb_any(skb);
4193 return NETDEV_TX_OK;
4196 /* The minimum packet size for olinfo paylen is 17 so pad the skb
4197 * in order to meet this minimum size requirement.
4199 if (skb->len < 17) {
4200 if (skb_padto(skb, 17))
4201 return NETDEV_TX_OK;
4205 tx_ring = adapter->tx_ring[skb->queue_mapping];
4206 return ixgbevf_xmit_frame_ring(skb, tx_ring);
4210 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
4211 * @netdev: network interface device structure
4212 * @p: pointer to an address structure
4214 * Returns 0 on success, negative on failure
4216 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
4218 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4219 struct ixgbe_hw *hw = &adapter->hw;
4220 struct sockaddr *addr = p;
4223 if (!is_valid_ether_addr(addr->sa_data))
4224 return -EADDRNOTAVAIL;
4226 spin_lock_bh(&adapter->mbx_lock);
4228 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
4230 spin_unlock_bh(&adapter->mbx_lock);
4235 ether_addr_copy(hw->mac.addr, addr->sa_data);
4236 ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
4237 ether_addr_copy(netdev->dev_addr, addr->sa_data);
4243 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
4244 * @netdev: network interface device structure
4245 * @new_mtu: new value for maximum frame size
4247 * Returns 0 on success, negative on failure
4249 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
4251 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4252 struct ixgbe_hw *hw = &adapter->hw;
4253 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4256 /* prevent MTU being changed to a size unsupported by XDP */
4257 if (adapter->xdp_prog) {
4258 dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n");
4262 spin_lock_bh(&adapter->mbx_lock);
4263 /* notify the PF of our intent to use this size of frame */
4264 ret = hw->mac.ops.set_rlpml(hw, max_frame);
4265 spin_unlock_bh(&adapter->mbx_lock);
4269 hw_dbg(hw, "changing MTU from %d to %d\n",
4270 netdev->mtu, new_mtu);
4272 /* must set new MTU before calling down or up */
4273 netdev->mtu = new_mtu;
4275 if (netif_running(netdev))
4276 ixgbevf_reinit_locked(adapter);
4281 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
4283 struct net_device *netdev = pci_get_drvdata(pdev);
4284 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4290 netif_device_detach(netdev);
4292 if (netif_running(netdev))
4293 ixgbevf_close_suspend(adapter);
4295 ixgbevf_clear_interrupt_scheme(adapter);
4299 retval = pci_save_state(pdev);
4304 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4305 pci_disable_device(pdev);
4311 static int ixgbevf_resume(struct pci_dev *pdev)
4313 struct net_device *netdev = pci_get_drvdata(pdev);
4314 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4317 pci_restore_state(pdev);
4318 /* pci_restore_state clears dev->state_saved so call
4319 * pci_save_state to restore it.
4321 pci_save_state(pdev);
4323 err = pci_enable_device_mem(pdev);
4325 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
4329 adapter->hw.hw_addr = adapter->io_addr;
4330 smp_mb__before_atomic();
4331 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4332 pci_set_master(pdev);
4334 ixgbevf_reset(adapter);
4337 err = ixgbevf_init_interrupt_scheme(adapter);
4338 if (!err && netif_running(netdev))
4339 err = ixgbevf_open(netdev);
4344 netif_device_attach(netdev);
4349 #endif /* CONFIG_PM */
4350 static void ixgbevf_shutdown(struct pci_dev *pdev)
4352 ixgbevf_suspend(pdev, PMSG_SUSPEND);
4355 static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
4356 const struct ixgbevf_ring *ring)
4363 start = u64_stats_fetch_begin_irq(&ring->syncp);
4364 bytes = ring->stats.bytes;
4365 packets = ring->stats.packets;
4366 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4367 stats->tx_bytes += bytes;
4368 stats->tx_packets += packets;
4372 static void ixgbevf_get_stats(struct net_device *netdev,
4373 struct rtnl_link_stats64 *stats)
4375 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4378 const struct ixgbevf_ring *ring;
4381 ixgbevf_update_stats(adapter);
4383 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
4386 for (i = 0; i < adapter->num_rx_queues; i++) {
4387 ring = adapter->rx_ring[i];
4389 start = u64_stats_fetch_begin_irq(&ring->syncp);
4390 bytes = ring->stats.bytes;
4391 packets = ring->stats.packets;
4392 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4393 stats->rx_bytes += bytes;
4394 stats->rx_packets += packets;
4397 for (i = 0; i < adapter->num_tx_queues; i++) {
4398 ring = adapter->tx_ring[i];
4399 ixgbevf_get_tx_ring_stats(stats, ring);
4402 for (i = 0; i < adapter->num_xdp_queues; i++) {
4403 ring = adapter->xdp_ring[i];
4404 ixgbevf_get_tx_ring_stats(stats, ring);
4409 #define IXGBEVF_MAX_MAC_HDR_LEN 127
4410 #define IXGBEVF_MAX_NETWORK_HDR_LEN 511
4412 static netdev_features_t
4413 ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
4414 netdev_features_t features)
4416 unsigned int network_hdr_len, mac_hdr_len;
4418 /* Make certain the headers can be described by a context descriptor */
4419 mac_hdr_len = skb_network_header(skb) - skb->data;
4420 if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
4421 return features & ~(NETIF_F_HW_CSUM |
4423 NETIF_F_HW_VLAN_CTAG_TX |
4427 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
4428 if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
4429 return features & ~(NETIF_F_HW_CSUM |
4434 /* We can only support IPV4 TSO in tunnels if we can mangle the
4435 * inner IP ID field, so strip TSO if MANGLEID is not supported.
4437 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
4438 features &= ~NETIF_F_TSO;
4443 static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
4445 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4446 struct ixgbevf_adapter *adapter = netdev_priv(dev);
4447 struct bpf_prog *old_prog;
4449 /* verify ixgbevf ring attributes are sufficient for XDP */
4450 for (i = 0; i < adapter->num_rx_queues; i++) {
4451 struct ixgbevf_ring *ring = adapter->rx_ring[i];
4453 if (frame_size > ixgbevf_rx_bufsz(ring))
4457 old_prog = xchg(&adapter->xdp_prog, prog);
4459 /* If transitioning XDP modes reconfigure rings */
4460 if (!!prog != !!old_prog) {
4461 /* Hardware has to reinitialize queues and interrupts to
4462 * match packet buffer alignment. Unfortunately, the
4463 * hardware is not flexible enough to do this dynamically.
4465 if (netif_running(dev))
4468 ixgbevf_clear_interrupt_scheme(adapter);
4469 ixgbevf_init_interrupt_scheme(adapter);
4471 if (netif_running(dev))
4474 for (i = 0; i < adapter->num_rx_queues; i++)
4475 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
4479 bpf_prog_put(old_prog);
4484 static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4486 struct ixgbevf_adapter *adapter = netdev_priv(dev);
4488 switch (xdp->command) {
4489 case XDP_SETUP_PROG:
4490 return ixgbevf_xdp_setup(dev, xdp->prog);
4491 case XDP_QUERY_PROG:
4492 xdp->prog_id = adapter->xdp_prog ?
4493 adapter->xdp_prog->aux->id : 0;
4500 static const struct net_device_ops ixgbevf_netdev_ops = {
4501 .ndo_open = ixgbevf_open,
4502 .ndo_stop = ixgbevf_close,
4503 .ndo_start_xmit = ixgbevf_xmit_frame,
4504 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4505 .ndo_get_stats64 = ixgbevf_get_stats,
4506 .ndo_validate_addr = eth_validate_addr,
4507 .ndo_set_mac_address = ixgbevf_set_mac,
4508 .ndo_change_mtu = ixgbevf_change_mtu,
4509 .ndo_tx_timeout = ixgbevf_tx_timeout,
4510 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
4511 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
4512 .ndo_features_check = ixgbevf_features_check,
4513 .ndo_bpf = ixgbevf_xdp,
4516 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
4518 dev->netdev_ops = &ixgbevf_netdev_ops;
4519 ixgbevf_set_ethtool_ops(dev);
4520 dev->watchdog_timeo = 5 * HZ;
4524 * ixgbevf_probe - Device Initialization Routine
4525 * @pdev: PCI device information struct
4526 * @ent: entry in ixgbevf_pci_tbl
4528 * Returns 0 on success, negative on failure
4530 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
4531 * The OS initialization, configuring of the adapter private structure,
4532 * and a hardware reset occur.
4534 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4536 struct net_device *netdev;
4537 struct ixgbevf_adapter *adapter = NULL;
4538 struct ixgbe_hw *hw = NULL;
4539 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
4540 int err, pci_using_dac;
4541 bool disable_dev = false;
4543 err = pci_enable_device(pdev);
4547 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
4550 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4552 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4558 err = pci_request_regions(pdev, ixgbevf_driver_name);
4560 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
4564 pci_set_master(pdev);
4566 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
4570 goto err_alloc_etherdev;
4573 SET_NETDEV_DEV(netdev, &pdev->dev);
4575 adapter = netdev_priv(netdev);
4577 adapter->netdev = netdev;
4578 adapter->pdev = pdev;
4581 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4583 /* call save state here in standalone driver because it relies on
4584 * adapter struct to exist, and needs to call netdev_priv
4586 pci_save_state(pdev);
4588 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4589 pci_resource_len(pdev, 0));
4590 adapter->io_addr = hw->hw_addr;
4596 ixgbevf_assign_netdev_ops(netdev);
4599 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
4600 hw->mac.type = ii->mac;
4602 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
4603 sizeof(struct ixgbe_mbx_operations));
4605 /* setup the private structure */
4606 err = ixgbevf_sw_init(adapter);
4610 /* The HW MAC address was set and/or determined in sw_init */
4611 if (!is_valid_ether_addr(netdev->dev_addr)) {
4612 pr_err("invalid MAC address\n");
4617 netdev->hw_features = NETIF_F_SG |
4624 #define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
4625 NETIF_F_GSO_GRE_CSUM | \
4626 NETIF_F_GSO_IPXIP4 | \
4627 NETIF_F_GSO_IPXIP6 | \
4628 NETIF_F_GSO_UDP_TUNNEL | \
4629 NETIF_F_GSO_UDP_TUNNEL_CSUM)
4631 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
4632 netdev->hw_features |= NETIF_F_GSO_PARTIAL |
4633 IXGBEVF_GSO_PARTIAL_FEATURES;
4635 netdev->features = netdev->hw_features;
4638 netdev->features |= NETIF_F_HIGHDMA;
4640 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
4641 netdev->mpls_features |= NETIF_F_SG |
4645 netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES;
4646 netdev->hw_enc_features |= netdev->vlan_features;
4648 /* set this bit last since it cannot be part of vlan_features */
4649 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4650 NETIF_F_HW_VLAN_CTAG_RX |
4651 NETIF_F_HW_VLAN_CTAG_TX;
4653 netdev->priv_flags |= IFF_UNICAST_FLT;
4655 /* MTU range: 68 - 1504 or 9710 */
4656 netdev->min_mtu = ETH_MIN_MTU;
4657 switch (adapter->hw.api_version) {
4658 case ixgbe_mbox_api_11:
4659 case ixgbe_mbox_api_12:
4660 case ixgbe_mbox_api_13:
4661 case ixgbe_mbox_api_14:
4662 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4663 (ETH_HLEN + ETH_FCS_LEN);
4666 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
4667 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4668 (ETH_HLEN + ETH_FCS_LEN);
4670 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN;
4674 if (IXGBE_REMOVED(hw->hw_addr)) {
4679 timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0);
4681 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4682 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4683 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
4685 err = ixgbevf_init_interrupt_scheme(adapter);
4689 strcpy(netdev->name, "eth%d");
4691 err = register_netdev(netdev);
4695 pci_set_drvdata(pdev, netdev);
4696 netif_carrier_off(netdev);
4697 ixgbevf_init_ipsec_offload(adapter);
4699 ixgbevf_init_last_counter_stats(adapter);
4701 /* print the VF info */
4702 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4703 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
4705 switch (hw->mac.type) {
4706 case ixgbe_mac_X550_vf:
4707 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4709 case ixgbe_mac_X540_vf:
4710 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4712 case ixgbe_mac_82599_vf:
4714 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4721 ixgbevf_clear_interrupt_scheme(adapter);
4723 ixgbevf_reset_interrupt_capability(adapter);
4724 iounmap(adapter->io_addr);
4725 kfree(adapter->rss_key);
4727 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4728 free_netdev(netdev);
4730 pci_release_regions(pdev);
4733 if (!adapter || disable_dev)
4734 pci_disable_device(pdev);
4739 * ixgbevf_remove - Device Removal Routine
4740 * @pdev: PCI device information struct
4742 * ixgbevf_remove is called by the PCI subsystem to alert the driver
4743 * that it should release a PCI device. The could be caused by a
4744 * Hot-Plug event, or because the driver is going to be removed from
4747 static void ixgbevf_remove(struct pci_dev *pdev)
4749 struct net_device *netdev = pci_get_drvdata(pdev);
4750 struct ixgbevf_adapter *adapter;
4756 adapter = netdev_priv(netdev);
4758 set_bit(__IXGBEVF_REMOVING, &adapter->state);
4759 cancel_work_sync(&adapter->service_task);
4761 if (netdev->reg_state == NETREG_REGISTERED)
4762 unregister_netdev(netdev);
4764 ixgbevf_stop_ipsec_offload(adapter);
4765 ixgbevf_clear_interrupt_scheme(adapter);
4766 ixgbevf_reset_interrupt_capability(adapter);
4768 iounmap(adapter->io_addr);
4769 pci_release_regions(pdev);
4771 hw_dbg(&adapter->hw, "Remove complete\n");
4773 kfree(adapter->rss_key);
4774 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4775 free_netdev(netdev);
4778 pci_disable_device(pdev);
4782 * ixgbevf_io_error_detected - called when PCI error is detected
4783 * @pdev: Pointer to PCI device
4784 * @state: The current pci connection state
4786 * This function is called after a PCI bus error affecting
4787 * this device has been detected.
4789 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4790 pci_channel_state_t state)
4792 struct net_device *netdev = pci_get_drvdata(pdev);
4793 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4795 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
4796 return PCI_ERS_RESULT_DISCONNECT;
4799 netif_device_detach(netdev);
4801 if (netif_running(netdev))
4802 ixgbevf_close_suspend(adapter);
4804 if (state == pci_channel_io_perm_failure) {
4806 return PCI_ERS_RESULT_DISCONNECT;
4809 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4810 pci_disable_device(pdev);
4813 /* Request a slot slot reset. */
4814 return PCI_ERS_RESULT_NEED_RESET;
4818 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4819 * @pdev: Pointer to PCI device
4821 * Restart the card from scratch, as if from a cold-boot. Implementation
4822 * resembles the first-half of the ixgbevf_resume routine.
4824 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4826 struct net_device *netdev = pci_get_drvdata(pdev);
4827 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4829 if (pci_enable_device_mem(pdev)) {
4831 "Cannot re-enable PCI device after reset.\n");
4832 return PCI_ERS_RESULT_DISCONNECT;
4835 adapter->hw.hw_addr = adapter->io_addr;
4836 smp_mb__before_atomic();
4837 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4838 pci_set_master(pdev);
4840 ixgbevf_reset(adapter);
4842 return PCI_ERS_RESULT_RECOVERED;
4846 * ixgbevf_io_resume - called when traffic can start flowing again.
4847 * @pdev: Pointer to PCI device
4849 * This callback is called when the error recovery driver tells us that
4850 * its OK to resume normal operation. Implementation resembles the
4851 * second-half of the ixgbevf_resume routine.
4853 static void ixgbevf_io_resume(struct pci_dev *pdev)
4855 struct net_device *netdev = pci_get_drvdata(pdev);
4858 if (netif_running(netdev))
4859 ixgbevf_open(netdev);
4861 netif_device_attach(netdev);
4865 /* PCI Error Recovery (ERS) */
4866 static const struct pci_error_handlers ixgbevf_err_handler = {
4867 .error_detected = ixgbevf_io_error_detected,
4868 .slot_reset = ixgbevf_io_slot_reset,
4869 .resume = ixgbevf_io_resume,
4872 static struct pci_driver ixgbevf_driver = {
4873 .name = ixgbevf_driver_name,
4874 .id_table = ixgbevf_pci_tbl,
4875 .probe = ixgbevf_probe,
4876 .remove = ixgbevf_remove,
4878 /* Power Management Hooks */
4879 .suspend = ixgbevf_suspend,
4880 .resume = ixgbevf_resume,
4882 .shutdown = ixgbevf_shutdown,
4883 .err_handler = &ixgbevf_err_handler
4887 * ixgbevf_init_module - Driver Registration Routine
4889 * ixgbevf_init_module is the first routine called when the driver is
4890 * loaded. All it does is register with the PCI subsystem.
4892 static int __init ixgbevf_init_module(void)
4894 pr_info("%s - version %s\n", ixgbevf_driver_string,
4895 ixgbevf_driver_version);
4897 pr_info("%s\n", ixgbevf_copyright);
4898 ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
4900 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name);
4904 return pci_register_driver(&ixgbevf_driver);
4907 module_init(ixgbevf_init_module);
4910 * ixgbevf_exit_module - Driver Exit Cleanup Routine
4912 * ixgbevf_exit_module is called just before the driver is removed
4915 static void __exit ixgbevf_exit_module(void)
4917 pci_unregister_driver(&ixgbevf_driver);
4919 destroy_workqueue(ixgbevf_wq);
4926 * ixgbevf_get_hw_dev_name - return device name string
4927 * used by hardware layer to print debugging information
4928 * @hw: pointer to private hardware struct
4930 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4932 struct ixgbevf_adapter *adapter = hw->back;
4934 return adapter->netdev->name;
4938 module_exit(ixgbevf_exit_module);
4940 /* ixgbevf_main.c */