1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 /* All iavf tracepoints are defined by the include below, which must
8 * be included exactly once across the whole kernel with
9 * CREATE_TRACE_POINTS defined
11 #define CREATE_TRACE_POINTS
12 #include "iavf_trace.h"
14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16 static int iavf_close(struct net_device *netdev);
17 static void iavf_init_get_resources(struct iavf_adapter *adapter);
18 static int iavf_check_reset_complete(struct iavf_hw *hw);
20 char iavf_driver_name[] = "iavf";
21 static const char iavf_driver_string[] =
22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
24 static const char iavf_copyright[] =
25 "Copyright (c) 2013 - 2018 Intel Corporation.";
27 /* iavf_pci_tbl - PCI Device ID Table
29 * Wildcard entries (PCI_ANY_ID) should come last
30 * Last entry must be all 0s
32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
33 * Class, Class Mask, private data (not used) }
35 static const struct pci_device_id iavf_pci_tbl[] = {
36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
40 /* required last entry */
44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
46 MODULE_ALIAS("i40evf");
47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
49 MODULE_LICENSE("GPL v2");
51 static const struct net_device_ops iavf_netdev_ops;
52 struct workqueue_struct *iavf_wq;
54 int iavf_status_to_errno(enum iavf_status status)
60 case IAVF_ERR_MAC_TYPE:
61 case IAVF_ERR_INVALID_MAC_ADDR:
62 case IAVF_ERR_INVALID_LINK_SETTINGS:
63 case IAVF_ERR_INVALID_PD_ID:
64 case IAVF_ERR_INVALID_QP_ID:
65 case IAVF_ERR_INVALID_CQ_ID:
66 case IAVF_ERR_INVALID_CEQ_ID:
67 case IAVF_ERR_INVALID_AEQ_ID:
68 case IAVF_ERR_INVALID_SIZE:
69 case IAVF_ERR_INVALID_ARP_INDEX:
70 case IAVF_ERR_INVALID_FPM_FUNC_ID:
71 case IAVF_ERR_QP_INVALID_MSG_SIZE:
72 case IAVF_ERR_INVALID_FRAG_COUNT:
73 case IAVF_ERR_INVALID_ALIGNMENT:
74 case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
75 case IAVF_ERR_INVALID_IMM_DATA_SIZE:
76 case IAVF_ERR_INVALID_VF_ID:
77 case IAVF_ERR_INVALID_HMCFN_ID:
78 case IAVF_ERR_INVALID_PBLE_INDEX:
79 case IAVF_ERR_INVALID_SD_INDEX:
80 case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
81 case IAVF_ERR_INVALID_SD_TYPE:
82 case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
83 case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
84 case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
87 case IAVF_ERR_NVM_CHECKSUM:
90 case IAVF_ERR_UNKNOWN_PHY:
91 case IAVF_ERR_LINK_SETUP:
92 case IAVF_ERR_ADAPTER_STOPPED:
93 case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
94 case IAVF_ERR_AUTONEG_NOT_COMPLETE:
95 case IAVF_ERR_RESET_FAILED:
96 case IAVF_ERR_BAD_PTR:
97 case IAVF_ERR_SWFW_SYNC:
98 case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
99 case IAVF_ERR_QUEUE_EMPTY:
100 case IAVF_ERR_FLUSHED_QUEUE:
101 case IAVF_ERR_OPCODE_MISMATCH:
102 case IAVF_ERR_CQP_COMPL_ERROR:
103 case IAVF_ERR_BACKING_PAGE_ERROR:
104 case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
105 case IAVF_ERR_MEMCPY_FAILED:
106 case IAVF_ERR_SRQ_ENABLED:
107 case IAVF_ERR_ADMIN_QUEUE_ERROR:
108 case IAVF_ERR_ADMIN_QUEUE_FULL:
109 case IAVF_ERR_BAD_IWARP_CQE:
110 case IAVF_ERR_NVM_BLANK_MODE:
111 case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
112 case IAVF_ERR_DIAG_TEST_FAILED:
113 case IAVF_ERR_FIRMWARE_API_VERSION:
114 case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
116 case IAVF_ERR_DEVICE_NOT_SUPPORTED:
118 case IAVF_ERR_NO_AVAILABLE_VSI:
119 case IAVF_ERR_RING_FULL:
121 case IAVF_ERR_NO_MEMORY:
123 case IAVF_ERR_TIMEOUT:
124 case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
126 case IAVF_ERR_NOT_IMPLEMENTED:
127 case IAVF_NOT_SUPPORTED:
129 case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
131 case IAVF_ERR_NOT_READY:
133 case IAVF_ERR_BUF_TOO_SHORT:
140 int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
143 case VIRTCHNL_STATUS_SUCCESS:
145 case VIRTCHNL_STATUS_ERR_PARAM:
146 case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
148 case VIRTCHNL_STATUS_ERR_NO_MEMORY:
150 case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
151 case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
152 case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
154 case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
162 * iavf_pdev_to_adapter - go from pci_dev to adapter
163 * @pdev: pci_dev pointer
165 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
167 return netdev_priv(pci_get_drvdata(pdev));
171 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
172 * @hw: pointer to the HW structure
173 * @mem: ptr to mem struct to fill out
174 * @size: size of memory requested
175 * @alignment: what to align the allocation to
177 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
178 struct iavf_dma_mem *mem,
179 u64 size, u32 alignment)
181 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
184 return IAVF_ERR_PARAM;
186 mem->size = ALIGN(size, alignment);
187 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
188 (dma_addr_t *)&mem->pa, GFP_KERNEL);
192 return IAVF_ERR_NO_MEMORY;
196 * iavf_free_dma_mem_d - OS specific memory free for shared code
197 * @hw: pointer to the HW structure
198 * @mem: ptr to mem struct to free
200 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
201 struct iavf_dma_mem *mem)
203 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
205 if (!mem || !mem->va)
206 return IAVF_ERR_PARAM;
207 dma_free_coherent(&adapter->pdev->dev, mem->size,
208 mem->va, (dma_addr_t)mem->pa);
213 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
214 * @hw: pointer to the HW structure
215 * @mem: ptr to mem struct to fill out
216 * @size: size of memory requested
218 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
219 struct iavf_virt_mem *mem, u32 size)
222 return IAVF_ERR_PARAM;
225 mem->va = kzalloc(size, GFP_KERNEL);
230 return IAVF_ERR_NO_MEMORY;
234 * iavf_free_virt_mem_d - OS specific memory free for shared code
235 * @hw: pointer to the HW structure
236 * @mem: ptr to mem struct to free
238 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
239 struct iavf_virt_mem *mem)
242 return IAVF_ERR_PARAM;
244 /* it's ok to kfree a NULL pointer */
251 * iavf_lock_timeout - try to lock mutex but give up after timeout
252 * @lock: mutex that should be locked
253 * @msecs: timeout in msecs
255 * Returns 0 on success, negative on failure
257 int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
259 unsigned int wait, delay = 10;
261 for (wait = 0; wait < msecs; wait += delay) {
262 if (mutex_trylock(lock))
272 * iavf_schedule_reset - Set the flags and schedule a reset event
273 * @adapter: board private structure
275 void iavf_schedule_reset(struct iavf_adapter *adapter)
277 if (!(adapter->flags &
278 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
279 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
280 queue_work(iavf_wq, &adapter->reset_task);
285 * iavf_schedule_request_stats - Set the flags and schedule statistics request
286 * @adapter: board private structure
288 * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
289 * request and refresh ethtool stats
291 void iavf_schedule_request_stats(struct iavf_adapter *adapter)
293 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
294 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
298 * iavf_tx_timeout - Respond to a Tx Hang
299 * @netdev: network interface device structure
300 * @txqueue: queue number that is timing out
302 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
304 struct iavf_adapter *adapter = netdev_priv(netdev);
306 adapter->tx_timeout_count++;
307 iavf_schedule_reset(adapter);
311 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
312 * @adapter: board private structure
314 static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
316 struct iavf_hw *hw = &adapter->hw;
318 if (!adapter->msix_entries)
321 wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
325 synchronize_irq(adapter->msix_entries[0].vector);
329 * iavf_misc_irq_enable - Enable default interrupt generation settings
330 * @adapter: board private structure
332 static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
334 struct iavf_hw *hw = &adapter->hw;
336 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
337 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
338 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
344 * iavf_irq_disable - Mask off interrupt generation on the NIC
345 * @adapter: board private structure
347 static void iavf_irq_disable(struct iavf_adapter *adapter)
350 struct iavf_hw *hw = &adapter->hw;
352 if (!adapter->msix_entries)
355 for (i = 1; i < adapter->num_msix_vectors; i++) {
356 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
357 synchronize_irq(adapter->msix_entries[i].vector);
363 * iavf_irq_enable_queues - Enable interrupt for specified queues
364 * @adapter: board private structure
365 * @mask: bitmap of queues to enable
367 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
369 struct iavf_hw *hw = &adapter->hw;
372 for (i = 1; i < adapter->num_msix_vectors; i++) {
373 if (mask & BIT(i - 1)) {
374 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
375 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
376 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
382 * iavf_irq_enable - Enable default interrupt generation settings
383 * @adapter: board private structure
384 * @flush: boolean value whether to run rd32()
386 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
388 struct iavf_hw *hw = &adapter->hw;
390 iavf_misc_irq_enable(adapter);
391 iavf_irq_enable_queues(adapter, ~0);
398 * iavf_msix_aq - Interrupt handler for vector 0
399 * @irq: interrupt number
400 * @data: pointer to netdev
402 static irqreturn_t iavf_msix_aq(int irq, void *data)
404 struct net_device *netdev = data;
405 struct iavf_adapter *adapter = netdev_priv(netdev);
406 struct iavf_hw *hw = &adapter->hw;
408 /* handle non-queue interrupts, these reads clear the registers */
409 rd32(hw, IAVF_VFINT_ICR01);
410 rd32(hw, IAVF_VFINT_ICR0_ENA1);
412 if (adapter->state != __IAVF_REMOVE)
413 /* schedule work on the private workqueue */
414 queue_work(iavf_wq, &adapter->adminq_task);
420 * iavf_msix_clean_rings - MSIX mode Interrupt Handler
421 * @irq: interrupt number
422 * @data: pointer to a q_vector
424 static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
426 struct iavf_q_vector *q_vector = data;
428 if (!q_vector->tx.ring && !q_vector->rx.ring)
431 napi_schedule_irqoff(&q_vector->napi);
437 * iavf_map_vector_to_rxq - associate irqs with rx queues
438 * @adapter: board private structure
439 * @v_idx: interrupt number
440 * @r_idx: queue number
443 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
445 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
446 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
447 struct iavf_hw *hw = &adapter->hw;
449 rx_ring->q_vector = q_vector;
450 rx_ring->next = q_vector->rx.ring;
451 rx_ring->vsi = &adapter->vsi;
452 q_vector->rx.ring = rx_ring;
453 q_vector->rx.count++;
454 q_vector->rx.next_update = jiffies + 1;
455 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
456 q_vector->ring_mask |= BIT(r_idx);
457 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
458 q_vector->rx.current_itr >> 1);
459 q_vector->rx.current_itr = q_vector->rx.target_itr;
463 * iavf_map_vector_to_txq - associate irqs with tx queues
464 * @adapter: board private structure
465 * @v_idx: interrupt number
466 * @t_idx: queue number
469 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
471 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
472 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
473 struct iavf_hw *hw = &adapter->hw;
475 tx_ring->q_vector = q_vector;
476 tx_ring->next = q_vector->tx.ring;
477 tx_ring->vsi = &adapter->vsi;
478 q_vector->tx.ring = tx_ring;
479 q_vector->tx.count++;
480 q_vector->tx.next_update = jiffies + 1;
481 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
482 q_vector->num_ringpairs++;
483 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
484 q_vector->tx.target_itr >> 1);
485 q_vector->tx.current_itr = q_vector->tx.target_itr;
489 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
490 * @adapter: board private structure to initialize
492 * This function maps descriptor rings to the queue-specific vectors
493 * we were allotted through the MSI-X enabling code. Ideally, we'd have
494 * one vector per ring/queue, but on a constrained vector budget, we
495 * group the rings as "efficiently" as possible. You would add new
496 * mapping configurations in here.
498 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
500 int rings_remaining = adapter->num_active_queues;
501 int ridx = 0, vidx = 0;
504 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
506 for (; ridx < rings_remaining; ridx++) {
507 iavf_map_vector_to_rxq(adapter, vidx, ridx);
508 iavf_map_vector_to_txq(adapter, vidx, ridx);
510 /* In the case where we have more queues than vectors, continue
511 * round-robin on vectors until all queues are mapped.
513 if (++vidx >= q_vectors)
517 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
521 * iavf_irq_affinity_notify - Callback for affinity changes
522 * @notify: context as to what irq was changed
523 * @mask: the new affinity mask
525 * This is a callback function used by the irq_set_affinity_notifier function
526 * so that we may register to receive changes to the irq affinity masks.
528 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
529 const cpumask_t *mask)
531 struct iavf_q_vector *q_vector =
532 container_of(notify, struct iavf_q_vector, affinity_notify);
534 cpumask_copy(&q_vector->affinity_mask, mask);
538 * iavf_irq_affinity_release - Callback for affinity notifier release
539 * @ref: internal core kernel usage
541 * This is a callback function used by the irq_set_affinity_notifier function
542 * to inform the current notification subscriber that they will no longer
543 * receive notifications.
545 static void iavf_irq_affinity_release(struct kref *ref) {}
548 * iavf_request_traffic_irqs - Initialize MSI-X interrupts
549 * @adapter: board private structure
550 * @basename: device basename
552 * Allocates MSI-X vectors for tx and rx handling, and requests
553 * interrupts from the kernel.
556 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
558 unsigned int vector, q_vectors;
559 unsigned int rx_int_idx = 0, tx_int_idx = 0;
563 iavf_irq_disable(adapter);
564 /* Decrement for Other and TCP Timer vectors */
565 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
567 for (vector = 0; vector < q_vectors; vector++) {
568 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
570 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
572 if (q_vector->tx.ring && q_vector->rx.ring) {
573 snprintf(q_vector->name, sizeof(q_vector->name),
574 "iavf-%s-TxRx-%u", basename, rx_int_idx++);
576 } else if (q_vector->rx.ring) {
577 snprintf(q_vector->name, sizeof(q_vector->name),
578 "iavf-%s-rx-%u", basename, rx_int_idx++);
579 } else if (q_vector->tx.ring) {
580 snprintf(q_vector->name, sizeof(q_vector->name),
581 "iavf-%s-tx-%u", basename, tx_int_idx++);
583 /* skip this unused q_vector */
586 err = request_irq(irq_num,
587 iavf_msix_clean_rings,
592 dev_info(&adapter->pdev->dev,
593 "Request_irq failed, error: %d\n", err);
594 goto free_queue_irqs;
596 /* register for affinity change notifications */
597 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
598 q_vector->affinity_notify.release =
599 iavf_irq_affinity_release;
600 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
601 /* Spread the IRQ affinity hints across online CPUs. Note that
602 * get_cpu_mask returns a mask with a permanent lifetime so
603 * it's safe to use as a hint for irq_update_affinity_hint.
605 cpu = cpumask_local_spread(q_vector->v_idx, -1);
606 irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
614 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
615 irq_set_affinity_notifier(irq_num, NULL);
616 irq_update_affinity_hint(irq_num, NULL);
617 free_irq(irq_num, &adapter->q_vectors[vector]);
623 * iavf_request_misc_irq - Initialize MSI-X interrupts
624 * @adapter: board private structure
626 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
627 * vector is only for the admin queue, and stays active even when the netdev
630 static int iavf_request_misc_irq(struct iavf_adapter *adapter)
632 struct net_device *netdev = adapter->netdev;
635 snprintf(adapter->misc_vector_name,
636 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
637 dev_name(&adapter->pdev->dev));
638 err = request_irq(adapter->msix_entries[0].vector,
640 adapter->misc_vector_name, netdev);
642 dev_err(&adapter->pdev->dev,
643 "request_irq for %s failed: %d\n",
644 adapter->misc_vector_name, err);
645 free_irq(adapter->msix_entries[0].vector, netdev);
651 * iavf_free_traffic_irqs - Free MSI-X interrupts
652 * @adapter: board private structure
654 * Frees all MSI-X vectors other than 0.
656 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
658 int vector, irq_num, q_vectors;
660 if (!adapter->msix_entries)
663 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
665 for (vector = 0; vector < q_vectors; vector++) {
666 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
667 irq_set_affinity_notifier(irq_num, NULL);
668 irq_update_affinity_hint(irq_num, NULL);
669 free_irq(irq_num, &adapter->q_vectors[vector]);
674 * iavf_free_misc_irq - Free MSI-X miscellaneous vector
675 * @adapter: board private structure
677 * Frees MSI-X vector 0.
679 static void iavf_free_misc_irq(struct iavf_adapter *adapter)
681 struct net_device *netdev = adapter->netdev;
683 if (!adapter->msix_entries)
686 free_irq(adapter->msix_entries[0].vector, netdev);
690 * iavf_configure_tx - Configure Transmit Unit after Reset
691 * @adapter: board private structure
693 * Configure the Tx unit of the MAC after a reset.
695 static void iavf_configure_tx(struct iavf_adapter *adapter)
697 struct iavf_hw *hw = &adapter->hw;
700 for (i = 0; i < adapter->num_active_queues; i++)
701 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
705 * iavf_configure_rx - Configure Receive Unit after Reset
706 * @adapter: board private structure
708 * Configure the Rx unit of the MAC after a reset.
710 static void iavf_configure_rx(struct iavf_adapter *adapter)
712 unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
713 struct iavf_hw *hw = &adapter->hw;
716 /* Legacy Rx will always default to a 2048 buffer size. */
717 #if (PAGE_SIZE < 8192)
718 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
719 struct net_device *netdev = adapter->netdev;
721 /* For jumbo frames on systems with 4K pages we have to use
722 * an order 1 page, so we might as well increase the size
723 * of our Rx buffer to make better use of the available space
725 rx_buf_len = IAVF_RXBUFFER_3072;
727 /* We use a 1536 buffer size for configurations with
728 * standard Ethernet mtu. On x86 this gives us enough room
729 * for shared info and 192 bytes of padding.
731 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
732 (netdev->mtu <= ETH_DATA_LEN))
733 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
737 for (i = 0; i < adapter->num_active_queues; i++) {
738 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
739 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
741 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
742 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
744 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
749 * iavf_find_vlan - Search filter list for specific vlan filter
750 * @adapter: board private structure
753 * Returns ptr to the filter object or NULL. Must be called while holding the
754 * mac_vlan_list_lock.
757 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
758 struct iavf_vlan vlan)
760 struct iavf_vlan_filter *f;
762 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
763 if (f->vlan.vid == vlan.vid &&
764 f->vlan.tpid == vlan.tpid)
772 * iavf_add_vlan - Add a vlan filter to the list
773 * @adapter: board private structure
776 * Returns ptr to the filter object or NULL when no memory available.
779 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
780 struct iavf_vlan vlan)
782 struct iavf_vlan_filter *f = NULL;
784 spin_lock_bh(&adapter->mac_vlan_list_lock);
786 f = iavf_find_vlan(adapter, vlan);
788 f = kzalloc(sizeof(*f), GFP_ATOMIC);
794 list_add_tail(&f->list, &adapter->vlan_filter_list);
796 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
800 spin_unlock_bh(&adapter->mac_vlan_list_lock);
805 * iavf_del_vlan - Remove a vlan filter from the list
806 * @adapter: board private structure
809 static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
811 struct iavf_vlan_filter *f;
813 spin_lock_bh(&adapter->mac_vlan_list_lock);
815 f = iavf_find_vlan(adapter, vlan);
818 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
821 spin_unlock_bh(&adapter->mac_vlan_list_lock);
825 * iavf_restore_filters
826 * @adapter: board private structure
828 * Restore existing non MAC filters when VF netdev comes back up
830 static void iavf_restore_filters(struct iavf_adapter *adapter)
834 /* re-add all VLAN filters */
835 for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
836 iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
838 for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
839 iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
843 * iavf_get_num_vlans_added - get number of VLANs added
844 * @adapter: board private structure
846 u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
848 return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
849 bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
853 * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
854 * @adapter: board private structure
856 * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
857 * do not impose a limit as that maintains current behavior and for
858 * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
860 static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
862 /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
863 * never been a limit on the VF driver side
865 if (VLAN_ALLOWED(adapter))
867 else if (VLAN_V2_ALLOWED(adapter))
868 return adapter->vlan_v2_caps.filtering.max_filters;
874 * iavf_max_vlans_added - check if maximum VLANs allowed already exist
875 * @adapter: board private structure
877 static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
879 if (iavf_get_num_vlans_added(adapter) <
880 iavf_get_max_vlans_allowed(adapter))
887 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
888 * @netdev: network device struct
889 * @proto: unused protocol data
892 static int iavf_vlan_rx_add_vid(struct net_device *netdev,
893 __always_unused __be16 proto, u16 vid)
895 struct iavf_adapter *adapter = netdev_priv(netdev);
897 if (!VLAN_FILTERING_ALLOWED(adapter))
900 if (iavf_max_vlans_added(adapter)) {
901 netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
902 iavf_get_max_vlans_allowed(adapter));
906 if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
913 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
914 * @netdev: network device struct
915 * @proto: unused protocol data
918 static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
919 __always_unused __be16 proto, u16 vid)
921 struct iavf_adapter *adapter = netdev_priv(netdev);
923 iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
924 if (proto == cpu_to_be16(ETH_P_8021Q))
925 clear_bit(vid, adapter->vsi.active_cvlans);
927 clear_bit(vid, adapter->vsi.active_svlans);
933 * iavf_find_filter - Search filter list for specific mac filter
934 * @adapter: board private structure
935 * @macaddr: the MAC address
937 * Returns ptr to the filter object or NULL. Must be called while holding the
938 * mac_vlan_list_lock.
941 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
944 struct iavf_mac_filter *f;
949 list_for_each_entry(f, &adapter->mac_filter_list, list) {
950 if (ether_addr_equal(macaddr, f->macaddr))
957 * iavf_add_filter - Add a mac filter to the filter list
958 * @adapter: board private structure
959 * @macaddr: the MAC address
961 * Returns ptr to the filter object or NULL when no memory available.
963 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
966 struct iavf_mac_filter *f;
971 f = iavf_find_filter(adapter, macaddr);
973 f = kzalloc(sizeof(*f), GFP_ATOMIC);
977 ether_addr_copy(f->macaddr, macaddr);
979 list_add_tail(&f->list, &adapter->mac_filter_list);
981 f->add_handled = false;
982 f->is_new_mac = true;
983 f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
984 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
993 * iavf_replace_primary_mac - Replace current primary address
994 * @adapter: board private structure
995 * @new_mac: new MAC address to be applied
997 * Replace current dev_addr and send request to PF for removal of previous
998 * primary MAC address filter and addition of new primary MAC filter.
999 * Return 0 for success, -ENOMEM for failure.
1001 * Do not call this with mac_vlan_list_lock!
1003 int iavf_replace_primary_mac(struct iavf_adapter *adapter,
1006 struct iavf_hw *hw = &adapter->hw;
1007 struct iavf_mac_filter *f;
1009 spin_lock_bh(&adapter->mac_vlan_list_lock);
1011 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1012 f->is_primary = false;
1015 f = iavf_find_filter(adapter, hw->mac.addr);
1018 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1021 f = iavf_add_filter(adapter, new_mac);
1024 /* Always send the request to add if changing primary MAC
1025 * even if filter is already present on the list
1027 f->is_primary = true;
1029 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
1030 ether_addr_copy(hw->mac.addr, new_mac);
1033 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1035 /* schedule the watchdog task to immediately process the request */
1037 queue_work(iavf_wq, &adapter->watchdog_task.work);
1044 * iavf_is_mac_set_handled - wait for a response to set MAC from PF
1045 * @netdev: network interface device structure
1046 * @macaddr: MAC address to set
1048 * Returns true on success, false on failure
1050 static bool iavf_is_mac_set_handled(struct net_device *netdev,
1053 struct iavf_adapter *adapter = netdev_priv(netdev);
1054 struct iavf_mac_filter *f;
1057 spin_lock_bh(&adapter->mac_vlan_list_lock);
1059 f = iavf_find_filter(adapter, macaddr);
1061 if (!f || (!f->add && f->add_handled))
1064 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1070 * iavf_set_mac - NDO callback to set port MAC address
1071 * @netdev: network interface device structure
1072 * @p: pointer to an address structure
1074 * Returns 0 on success, negative on failure
1076 static int iavf_set_mac(struct net_device *netdev, void *p)
1078 struct iavf_adapter *adapter = netdev_priv(netdev);
1079 struct sockaddr *addr = p;
1082 if (!is_valid_ether_addr(addr->sa_data))
1083 return -EADDRNOTAVAIL;
1085 ret = iavf_replace_primary_mac(adapter, addr->sa_data);
1090 /* If this is an initial set MAC during VF spawn do not wait */
1091 if (adapter->flags & IAVF_FLAG_INITIAL_MAC_SET) {
1092 adapter->flags &= ~IAVF_FLAG_INITIAL_MAC_SET;
1096 ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
1097 iavf_is_mac_set_handled(netdev, addr->sa_data),
1098 msecs_to_jiffies(2500));
1100 /* If ret < 0 then it means wait was interrupted.
1101 * If ret == 0 then it means we got a timeout.
1102 * else it means we got response for set MAC from PF,
1103 * check if netdev MAC was updated to requested MAC,
1104 * if yes then set MAC succeeded otherwise it failed return -EACCES
1112 if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
1119 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
1120 * @netdev: the netdevice
1121 * @addr: address to add
1123 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1124 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1126 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
1128 struct iavf_adapter *adapter = netdev_priv(netdev);
1130 if (iavf_add_filter(adapter, addr))
1137 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1138 * @netdev: the netdevice
1139 * @addr: address to add
1141 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1142 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1144 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
1146 struct iavf_adapter *adapter = netdev_priv(netdev);
1147 struct iavf_mac_filter *f;
1149 /* Under some circumstances, we might receive a request to delete
1150 * our own device address from our uc list. Because we store the
1151 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1152 * such requests and not delete our device address from this list.
1154 if (ether_addr_equal(addr, netdev->dev_addr))
1157 f = iavf_find_filter(adapter, addr);
1160 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1166 * iavf_set_rx_mode - NDO callback to set the netdev filters
1167 * @netdev: network interface device structure
1169 static void iavf_set_rx_mode(struct net_device *netdev)
1171 struct iavf_adapter *adapter = netdev_priv(netdev);
1173 spin_lock_bh(&adapter->mac_vlan_list_lock);
1174 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1175 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1176 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1178 if (netdev->flags & IFF_PROMISC &&
1179 !(adapter->flags & IAVF_FLAG_PROMISC_ON))
1180 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
1181 else if (!(netdev->flags & IFF_PROMISC) &&
1182 adapter->flags & IAVF_FLAG_PROMISC_ON)
1183 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
1185 if (netdev->flags & IFF_ALLMULTI &&
1186 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
1187 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
1188 else if (!(netdev->flags & IFF_ALLMULTI) &&
1189 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
1190 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
1194 * iavf_napi_enable_all - enable NAPI on all queue vectors
1195 * @adapter: board private structure
1197 static void iavf_napi_enable_all(struct iavf_adapter *adapter)
1200 struct iavf_q_vector *q_vector;
1201 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1203 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1204 struct napi_struct *napi;
1206 q_vector = &adapter->q_vectors[q_idx];
1207 napi = &q_vector->napi;
1213 * iavf_napi_disable_all - disable NAPI on all queue vectors
1214 * @adapter: board private structure
1216 static void iavf_napi_disable_all(struct iavf_adapter *adapter)
1219 struct iavf_q_vector *q_vector;
1220 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1222 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1223 q_vector = &adapter->q_vectors[q_idx];
1224 napi_disable(&q_vector->napi);
1229 * iavf_configure - set up transmit and receive data structures
1230 * @adapter: board private structure
1232 static void iavf_configure(struct iavf_adapter *adapter)
1234 struct net_device *netdev = adapter->netdev;
1237 iavf_set_rx_mode(netdev);
1239 iavf_configure_tx(adapter);
1240 iavf_configure_rx(adapter);
1241 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
1243 for (i = 0; i < adapter->num_active_queues; i++) {
1244 struct iavf_ring *ring = &adapter->rx_rings[i];
1246 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
1251 * iavf_up_complete - Finish the last steps of bringing up a connection
1252 * @adapter: board private structure
1254 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1256 static void iavf_up_complete(struct iavf_adapter *adapter)
1258 iavf_change_state(adapter, __IAVF_RUNNING);
1259 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1261 iavf_napi_enable_all(adapter);
1263 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
1264 if (CLIENT_ENABLED(adapter))
1265 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
1266 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1270 * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
1271 * yet and mark other to be removed.
1272 * @adapter: board private structure
1274 static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
1276 struct iavf_vlan_filter *vlf, *vlftmp;
1277 struct iavf_mac_filter *f, *ftmp;
1279 spin_lock_bh(&adapter->mac_vlan_list_lock);
1280 /* clear the sync flag on all filters */
1281 __dev_uc_unsync(adapter->netdev, NULL);
1282 __dev_mc_unsync(adapter->netdev, NULL);
1284 /* remove all MAC filters */
1285 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
1295 /* remove all VLAN filters */
1296 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
1299 list_del(&vlf->list);
1305 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1309 * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
1310 * mark other to be removed.
1311 * @adapter: board private structure
1313 static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
1315 struct iavf_cloud_filter *cf, *cftmp;
1317 /* remove all cloud filters */
1318 spin_lock_bh(&adapter->cloud_filter_list_lock);
1319 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
1322 list_del(&cf->list);
1324 adapter->num_cloud_filters--;
1329 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1333 * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
1334 * other to be removed.
1335 * @adapter: board private structure
1337 static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
1339 struct iavf_fdir_fltr *fdir, *fdirtmp;
1341 /* remove all Flow Director filters */
1342 spin_lock_bh(&adapter->fdir_fltr_lock);
1343 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
1345 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1346 list_del(&fdir->list);
1348 adapter->fdir_active_fltr--;
1350 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1353 spin_unlock_bh(&adapter->fdir_fltr_lock);
1357 * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
1358 * other to be removed.
1359 * @adapter: board private structure
1361 static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
1363 struct iavf_adv_rss *rss, *rsstmp;
1365 /* remove all advance RSS configuration */
1366 spin_lock_bh(&adapter->adv_rss_lock);
1367 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
1369 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
1370 list_del(&rss->list);
1373 rss->state = IAVF_ADV_RSS_DEL_REQUEST;
1376 spin_unlock_bh(&adapter->adv_rss_lock);
1380 * iavf_down - Shutdown the connection processing
1381 * @adapter: board private structure
1383 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1385 void iavf_down(struct iavf_adapter *adapter)
1387 struct net_device *netdev = adapter->netdev;
1389 if (adapter->state <= __IAVF_DOWN_PENDING)
1392 netif_carrier_off(netdev);
1393 netif_tx_disable(netdev);
1394 adapter->link_up = false;
1395 iavf_napi_disable_all(adapter);
1396 iavf_irq_disable(adapter);
1398 iavf_clear_mac_vlan_filters(adapter);
1399 iavf_clear_cloud_filters(adapter);
1400 iavf_clear_fdir_filters(adapter);
1401 iavf_clear_adv_rss_conf(adapter);
1403 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
1404 /* cancel any current operation */
1405 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1406 /* Schedule operations to close down the HW. Don't wait
1407 * here for this to complete. The watchdog is still running
1408 * and it will take care of this.
1410 if (!list_empty(&adapter->mac_filter_list))
1411 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1412 if (!list_empty(&adapter->vlan_filter_list))
1413 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1414 if (!list_empty(&adapter->cloud_filter_list))
1415 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1416 if (!list_empty(&adapter->fdir_list_head))
1417 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1418 if (!list_empty(&adapter->adv_rss_list_head))
1419 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1420 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1423 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1427 * iavf_acquire_msix_vectors - Setup the MSIX capability
1428 * @adapter: board private structure
1429 * @vectors: number of vectors to request
1431 * Work with the OS to set up the MSIX vectors needed.
1433 * Returns 0 on success, negative on failure
1436 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1438 int err, vector_threshold;
1440 /* We'll want at least 3 (vector_threshold):
1441 * 0) Other (Admin Queue and link, mostly)
1445 vector_threshold = MIN_MSIX_COUNT;
1447 /* The more we get, the more we will assign to Tx/Rx Cleanup
1448 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1449 * Right now, we simply care about how many we'll get; we'll
1450 * set them up later while requesting irq's.
1452 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1453 vector_threshold, vectors);
1455 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1456 kfree(adapter->msix_entries);
1457 adapter->msix_entries = NULL;
1461 /* Adjust for only the vectors we'll use, which is minimum
1462 * of max_msix_q_vectors + NONQ_VECS, or the number of
1463 * vectors we were allocated.
1465 adapter->num_msix_vectors = err;
1470 * iavf_free_queues - Free memory for all rings
1471 * @adapter: board private structure to initialize
1473 * Free all of the memory associated with queue pairs.
1475 static void iavf_free_queues(struct iavf_adapter *adapter)
1477 if (!adapter->vsi_res)
1479 adapter->num_active_queues = 0;
1480 kfree(adapter->tx_rings);
1481 adapter->tx_rings = NULL;
1482 kfree(adapter->rx_rings);
1483 adapter->rx_rings = NULL;
1487 * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
1488 * @adapter: board private structure
1490 * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
1491 * stripped in certain descriptor fields. Instead of checking the offload
1492 * capability bits in the hot path, cache the location the ring specific
1495 void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
1499 for (i = 0; i < adapter->num_active_queues; i++) {
1500 struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1501 struct iavf_ring *rx_ring = &adapter->rx_rings[i];
1503 /* prevent multiple L2TAG bits being set after VFR */
1505 ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1506 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
1508 ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1509 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
1511 if (VLAN_ALLOWED(adapter)) {
1512 tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1513 rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1514 } else if (VLAN_V2_ALLOWED(adapter)) {
1515 struct virtchnl_vlan_supported_caps *stripping_support;
1516 struct virtchnl_vlan_supported_caps *insertion_support;
1519 &adapter->vlan_v2_caps.offloads.stripping_support;
1521 &adapter->vlan_v2_caps.offloads.insertion_support;
1523 if (stripping_support->outer) {
1524 if (stripping_support->outer &
1525 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1527 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1528 else if (stripping_support->outer &
1529 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1531 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1532 } else if (stripping_support->inner) {
1533 if (stripping_support->inner &
1534 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1536 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1537 else if (stripping_support->inner &
1538 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1540 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1543 if (insertion_support->outer) {
1544 if (insertion_support->outer &
1545 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1547 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1548 else if (insertion_support->outer &
1549 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1551 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1552 } else if (insertion_support->inner) {
1553 if (insertion_support->inner &
1554 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1556 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1557 else if (insertion_support->inner &
1558 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1560 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1567 * iavf_alloc_queues - Allocate memory for all rings
1568 * @adapter: board private structure to initialize
1570 * We allocate one ring per queue at run-time since we don't know the
1571 * number of queues at compile-time. The polling_netdev array is
1572 * intended for Multiqueue, but should work fine with a single queue.
1574 static int iavf_alloc_queues(struct iavf_adapter *adapter)
1576 int i, num_active_queues;
1578 /* If we're in reset reallocating queues we don't actually know yet for
1579 * certain the PF gave us the number of queues we asked for but we'll
1580 * assume it did. Once basic reset is finished we'll confirm once we
1581 * start negotiating config with PF.
1583 if (adapter->num_req_queues)
1584 num_active_queues = adapter->num_req_queues;
1585 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1587 num_active_queues = adapter->ch_config.total_qps;
1589 num_active_queues = min_t(int,
1590 adapter->vsi_res->num_queue_pairs,
1591 (int)(num_online_cpus()));
1594 adapter->tx_rings = kcalloc(num_active_queues,
1595 sizeof(struct iavf_ring), GFP_KERNEL);
1596 if (!adapter->tx_rings)
1598 adapter->rx_rings = kcalloc(num_active_queues,
1599 sizeof(struct iavf_ring), GFP_KERNEL);
1600 if (!adapter->rx_rings)
1603 for (i = 0; i < num_active_queues; i++) {
1604 struct iavf_ring *tx_ring;
1605 struct iavf_ring *rx_ring;
1607 tx_ring = &adapter->tx_rings[i];
1609 tx_ring->queue_index = i;
1610 tx_ring->netdev = adapter->netdev;
1611 tx_ring->dev = &adapter->pdev->dev;
1612 tx_ring->count = adapter->tx_desc_count;
1613 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1614 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1615 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1617 rx_ring = &adapter->rx_rings[i];
1618 rx_ring->queue_index = i;
1619 rx_ring->netdev = adapter->netdev;
1620 rx_ring->dev = &adapter->pdev->dev;
1621 rx_ring->count = adapter->rx_desc_count;
1622 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1625 adapter->num_active_queues = num_active_queues;
1627 iavf_set_queue_vlan_tag_loc(adapter);
1632 iavf_free_queues(adapter);
1637 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1638 * @adapter: board private structure to initialize
1640 * Attempt to configure the interrupts using the best available
1641 * capabilities of the hardware and the kernel.
1643 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1645 int vector, v_budget;
1649 if (!adapter->vsi_res) {
1653 pairs = adapter->num_active_queues;
1655 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1656 * us much good if we have more vectors than CPUs. However, we already
1657 * limit the total number of queues by the number of CPUs so we do not
1658 * need any further limiting here.
1660 v_budget = min_t(int, pairs + NONQ_VECS,
1661 (int)adapter->vf_res->max_vectors);
1663 adapter->msix_entries = kcalloc(v_budget,
1664 sizeof(struct msix_entry), GFP_KERNEL);
1665 if (!adapter->msix_entries) {
1670 for (vector = 0; vector < v_budget; vector++)
1671 adapter->msix_entries[vector].entry = vector;
1673 err = iavf_acquire_msix_vectors(adapter, v_budget);
1676 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1677 netif_set_real_num_tx_queues(adapter->netdev, pairs);
1682 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1683 * @adapter: board private structure
1685 * Return 0 on success, negative on failure
1687 static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1689 struct iavf_aqc_get_set_rss_key_data *rss_key =
1690 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1691 struct iavf_hw *hw = &adapter->hw;
1692 enum iavf_status status;
1694 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1695 /* bail because we already have a command pending */
1696 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1697 adapter->current_op);
1701 status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1703 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1704 iavf_stat_str(hw, status),
1705 iavf_aq_str(hw, hw->aq.asq_last_status));
1706 return iavf_status_to_errno(status);
1710 status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1711 adapter->rss_lut, adapter->rss_lut_size);
1713 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1714 iavf_stat_str(hw, status),
1715 iavf_aq_str(hw, hw->aq.asq_last_status));
1716 return iavf_status_to_errno(status);
1724 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1725 * @adapter: board private structure
1727 * Returns 0 on success, negative on failure
1729 static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1731 struct iavf_hw *hw = &adapter->hw;
1735 dw = (u32 *)adapter->rss_key;
1736 for (i = 0; i <= adapter->rss_key_size / 4; i++)
1737 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1739 dw = (u32 *)adapter->rss_lut;
1740 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1741 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1749 * iavf_config_rss - Configure RSS keys and lut
1750 * @adapter: board private structure
1752 * Returns 0 on success, negative on failure
1754 int iavf_config_rss(struct iavf_adapter *adapter)
1757 if (RSS_PF(adapter)) {
1758 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1759 IAVF_FLAG_AQ_SET_RSS_KEY;
1761 } else if (RSS_AQ(adapter)) {
1762 return iavf_config_rss_aq(adapter);
1764 return iavf_config_rss_reg(adapter);
1769 * iavf_fill_rss_lut - Fill the lut with default values
1770 * @adapter: board private structure
1772 static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1776 for (i = 0; i < adapter->rss_lut_size; i++)
1777 adapter->rss_lut[i] = i % adapter->num_active_queues;
1781 * iavf_init_rss - Prepare for RSS
1782 * @adapter: board private structure
1784 * Return 0 on success, negative on failure
1786 static int iavf_init_rss(struct iavf_adapter *adapter)
1788 struct iavf_hw *hw = &adapter->hw;
1790 if (!RSS_PF(adapter)) {
1791 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1792 if (adapter->vf_res->vf_cap_flags &
1793 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1794 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1796 adapter->hena = IAVF_DEFAULT_RSS_HENA;
1798 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1799 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1802 iavf_fill_rss_lut(adapter);
1803 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1805 return iavf_config_rss(adapter);
1809 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1810 * @adapter: board private structure to initialize
1812 * We allocate one q_vector per queue interrupt. If allocation fails we
1815 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1817 int q_idx = 0, num_q_vectors;
1818 struct iavf_q_vector *q_vector;
1820 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1821 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1823 if (!adapter->q_vectors)
1826 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1827 q_vector = &adapter->q_vectors[q_idx];
1828 q_vector->adapter = adapter;
1829 q_vector->vsi = &adapter->vsi;
1830 q_vector->v_idx = q_idx;
1831 q_vector->reg_idx = q_idx;
1832 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1833 netif_napi_add(adapter->netdev, &q_vector->napi,
1841 * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1842 * @adapter: board private structure to initialize
1844 * This function frees the memory allocated to the q_vectors. In addition if
1845 * NAPI is enabled it will delete any references to the NAPI struct prior
1846 * to freeing the q_vector.
1848 static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1850 int q_idx, num_q_vectors;
1853 if (!adapter->q_vectors)
1856 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1857 napi_vectors = adapter->num_active_queues;
1859 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1860 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1862 if (q_idx < napi_vectors)
1863 netif_napi_del(&q_vector->napi);
1865 kfree(adapter->q_vectors);
1866 adapter->q_vectors = NULL;
1870 * iavf_reset_interrupt_capability - Reset MSIX setup
1871 * @adapter: board private structure
1874 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1876 if (!adapter->msix_entries)
1879 pci_disable_msix(adapter->pdev);
1880 kfree(adapter->msix_entries);
1881 adapter->msix_entries = NULL;
1885 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1886 * @adapter: board private structure to initialize
1889 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1893 err = iavf_alloc_queues(adapter);
1895 dev_err(&adapter->pdev->dev,
1896 "Unable to allocate memory for queues\n");
1897 goto err_alloc_queues;
1901 err = iavf_set_interrupt_capability(adapter);
1904 dev_err(&adapter->pdev->dev,
1905 "Unable to setup interrupt capabilities\n");
1906 goto err_set_interrupt;
1909 err = iavf_alloc_q_vectors(adapter);
1911 dev_err(&adapter->pdev->dev,
1912 "Unable to allocate memory for queue vectors\n");
1913 goto err_alloc_q_vectors;
1916 /* If we've made it so far while ADq flag being ON, then we haven't
1917 * bailed out anywhere in middle. And ADq isn't just enabled but actual
1918 * resources have been allocated in the reset path.
1919 * Now we can truly claim that ADq is enabled.
1921 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1923 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1926 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1927 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1928 adapter->num_active_queues);
1931 err_alloc_q_vectors:
1932 iavf_reset_interrupt_capability(adapter);
1934 iavf_free_queues(adapter);
1940 * iavf_free_rss - Free memory used by RSS structs
1941 * @adapter: board private structure
1943 static void iavf_free_rss(struct iavf_adapter *adapter)
1945 kfree(adapter->rss_key);
1946 adapter->rss_key = NULL;
1948 kfree(adapter->rss_lut);
1949 adapter->rss_lut = NULL;
1953 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1954 * @adapter: board private structure
1956 * Returns 0 on success, negative on failure
1958 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1960 struct net_device *netdev = adapter->netdev;
1963 if (netif_running(netdev))
1964 iavf_free_traffic_irqs(adapter);
1965 iavf_free_misc_irq(adapter);
1966 iavf_reset_interrupt_capability(adapter);
1967 iavf_free_q_vectors(adapter);
1968 iavf_free_queues(adapter);
1970 err = iavf_init_interrupt_scheme(adapter);
1974 netif_tx_stop_all_queues(netdev);
1976 err = iavf_request_misc_irq(adapter);
1980 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1982 iavf_map_rings_to_vectors(adapter);
1988 * iavf_process_aq_command - process aq_required flags
1989 * and sends aq command
1990 * @adapter: pointer to iavf adapter structure
1992 * Returns 0 on success
1993 * Returns error code if no command was sent
1994 * or error code if the command failed.
1996 static int iavf_process_aq_command(struct iavf_adapter *adapter)
1998 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
1999 return iavf_send_vf_config_msg(adapter);
2000 if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
2001 return iavf_send_vf_offload_vlan_v2_msg(adapter);
2002 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
2003 iavf_disable_queues(adapter);
2007 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
2008 iavf_map_queues(adapter);
2012 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
2013 iavf_add_ether_addrs(adapter);
2017 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
2018 iavf_add_vlans(adapter);
2022 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
2023 iavf_del_ether_addrs(adapter);
2027 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
2028 iavf_del_vlans(adapter);
2032 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
2033 iavf_enable_vlan_stripping(adapter);
2037 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
2038 iavf_disable_vlan_stripping(adapter);
2042 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
2043 iavf_configure_queues(adapter);
2047 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
2048 iavf_enable_queues(adapter);
2052 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
2053 /* This message goes straight to the firmware, not the
2054 * PF, so we don't have to set current_op as we will
2055 * not get a response through the ARQ.
2057 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
2060 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
2061 iavf_get_hena(adapter);
2064 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
2065 iavf_set_hena(adapter);
2068 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
2069 iavf_set_rss_key(adapter);
2072 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
2073 iavf_set_rss_lut(adapter);
2077 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
2078 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
2079 FLAG_VF_MULTICAST_PROMISC);
2083 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
2084 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
2087 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
2088 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
2089 iavf_set_promiscuous(adapter, 0);
2093 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
2094 iavf_enable_channels(adapter);
2098 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
2099 iavf_disable_channels(adapter);
2102 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2103 iavf_add_cloud_filter(adapter);
2107 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2108 iavf_del_cloud_filter(adapter);
2111 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2112 iavf_del_cloud_filter(adapter);
2115 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2116 iavf_add_cloud_filter(adapter);
2119 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
2120 iavf_add_fdir_filter(adapter);
2121 return IAVF_SUCCESS;
2123 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
2124 iavf_del_fdir_filter(adapter);
2125 return IAVF_SUCCESS;
2127 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
2128 iavf_add_adv_rss_cfg(adapter);
2131 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
2132 iavf_del_adv_rss_cfg(adapter);
2135 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
2136 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2139 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
2140 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2143 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
2144 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2147 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
2148 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2151 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
2152 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2155 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
2156 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2159 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
2160 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2163 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
2164 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2168 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
2169 iavf_request_stats(adapter);
2177 * iavf_set_vlan_offload_features - set VLAN offload configuration
2178 * @adapter: board private structure
2179 * @prev_features: previous features used for comparison
2180 * @features: updated features used for configuration
2182 * Set the aq_required bit(s) based on the requested features passed in to
2183 * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
2184 * the watchdog if any changes are requested to expedite the request via
2188 iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
2189 netdev_features_t prev_features,
2190 netdev_features_t features)
2192 bool enable_stripping = true, enable_insertion = true;
2193 u16 vlan_ethertype = 0;
2194 u64 aq_required = 0;
2196 /* keep cases separate because one ethertype for offloads can be
2197 * disabled at the same time as another is disabled, so check for an
2198 * enabled ethertype first, then check for disabled. Default to
2199 * ETH_P_8021Q so an ethertype is specified if disabling insertion and
2202 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2203 vlan_ethertype = ETH_P_8021AD;
2204 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2205 vlan_ethertype = ETH_P_8021Q;
2206 else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2207 vlan_ethertype = ETH_P_8021AD;
2208 else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2209 vlan_ethertype = ETH_P_8021Q;
2211 vlan_ethertype = ETH_P_8021Q;
2213 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
2214 enable_stripping = false;
2215 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
2216 enable_insertion = false;
2218 if (VLAN_ALLOWED(adapter)) {
2219 /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
2220 * stripping via virtchnl. VLAN insertion can be toggled on the
2221 * netdev, but it doesn't require a virtchnl message
2223 if (enable_stripping)
2224 aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
2226 aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
2228 } else if (VLAN_V2_ALLOWED(adapter)) {
2229 switch (vlan_ethertype) {
2231 if (enable_stripping)
2232 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
2234 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
2236 if (enable_insertion)
2237 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
2239 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
2242 if (enable_stripping)
2243 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
2245 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
2247 if (enable_insertion)
2248 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
2250 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
2256 adapter->aq_required |= aq_required;
2257 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
2262 * iavf_startup - first step of driver startup
2263 * @adapter: board private structure
2265 * Function process __IAVF_STARTUP driver state.
2266 * When success the state is changed to __IAVF_INIT_VERSION_CHECK
2267 * when fails the state is changed to __IAVF_INIT_FAILED
2269 static void iavf_startup(struct iavf_adapter *adapter)
2271 struct pci_dev *pdev = adapter->pdev;
2272 struct iavf_hw *hw = &adapter->hw;
2273 enum iavf_status status;
2276 WARN_ON(adapter->state != __IAVF_STARTUP);
2278 /* driver loaded, probe complete */
2279 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2280 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2281 status = iavf_set_mac_type(hw);
2283 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
2287 ret = iavf_check_reset_complete(hw);
2289 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2293 hw->aq.num_arq_entries = IAVF_AQ_LEN;
2294 hw->aq.num_asq_entries = IAVF_AQ_LEN;
2295 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2296 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2298 status = iavf_init_adminq(hw);
2300 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2304 ret = iavf_send_api_ver(adapter);
2306 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
2307 iavf_shutdown_adminq(hw);
2310 iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
2313 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2317 * iavf_init_version_check - second step of driver startup
2318 * @adapter: board private structure
2320 * Function process __IAVF_INIT_VERSION_CHECK driver state.
2321 * When success the state is changed to __IAVF_INIT_GET_RESOURCES
2322 * when fails the state is changed to __IAVF_INIT_FAILED
2324 static void iavf_init_version_check(struct iavf_adapter *adapter)
2326 struct pci_dev *pdev = adapter->pdev;
2327 struct iavf_hw *hw = &adapter->hw;
2330 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
2332 if (!iavf_asq_done(hw)) {
2333 dev_err(&pdev->dev, "Admin queue command never completed\n");
2334 iavf_shutdown_adminq(hw);
2335 iavf_change_state(adapter, __IAVF_STARTUP);
2339 /* aq msg sent, awaiting reply */
2340 err = iavf_verify_api_ver(adapter);
2342 if (err == -EALREADY)
2343 err = iavf_send_api_ver(adapter);
2345 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2346 adapter->pf_version.major,
2347 adapter->pf_version.minor,
2348 VIRTCHNL_VERSION_MAJOR,
2349 VIRTCHNL_VERSION_MINOR);
2352 err = iavf_send_vf_config_msg(adapter);
2354 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2358 iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
2361 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2365 * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
2366 * @adapter: board private structure
2368 int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
2370 int i, num_req_queues = adapter->num_req_queues;
2371 struct iavf_vsi *vsi = &adapter->vsi;
2373 for (i = 0; i < adapter->vf_res->num_vsis; i++) {
2374 if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
2375 adapter->vsi_res = &adapter->vf_res->vsi_res[i];
2377 if (!adapter->vsi_res) {
2378 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2382 if (num_req_queues &&
2383 num_req_queues > adapter->vsi_res->num_queue_pairs) {
2384 /* Problem. The PF gave us fewer queues than what we had
2385 * negotiated in our request. Need a reset to see if we can't
2386 * get back to a working state.
2388 dev_err(&adapter->pdev->dev,
2389 "Requested %d queues, but PF only gave us %d.\n",
2391 adapter->vsi_res->num_queue_pairs);
2392 adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
2393 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
2394 iavf_schedule_reset(adapter);
2398 adapter->num_req_queues = 0;
2399 adapter->vsi.id = adapter->vsi_res->vsi_id;
2401 adapter->vsi.back = adapter;
2402 adapter->vsi.base_vector = 1;
2403 vsi->netdev = adapter->netdev;
2404 vsi->qs_handle = adapter->vsi_res->qset_handle;
2405 if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2406 adapter->rss_key_size = adapter->vf_res->rss_key_size;
2407 adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
2409 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
2410 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
2417 * iavf_init_get_resources - third step of driver startup
2418 * @adapter: board private structure
2420 * Function process __IAVF_INIT_GET_RESOURCES driver state and
2421 * finishes driver initialization procedure.
2422 * When success the state is changed to __IAVF_DOWN
2423 * when fails the state is changed to __IAVF_INIT_FAILED
2425 static void iavf_init_get_resources(struct iavf_adapter *adapter)
2427 struct pci_dev *pdev = adapter->pdev;
2428 struct iavf_hw *hw = &adapter->hw;
2431 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
2432 /* aq msg sent, awaiting reply */
2433 if (!adapter->vf_res) {
2434 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
2436 if (!adapter->vf_res) {
2441 err = iavf_get_vf_config(adapter);
2442 if (err == -EALREADY) {
2443 err = iavf_send_vf_config_msg(adapter);
2445 } else if (err == -EINVAL) {
2446 /* We only get -EINVAL if the device is in a very bad
2447 * state or if we've been disabled for previous bad
2448 * behavior. Either way, we're done now.
2450 iavf_shutdown_adminq(hw);
2451 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2455 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
2459 err = iavf_parse_vf_resource_msg(adapter);
2461 dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
2465 /* Some features require additional messages to negotiate extended
2466 * capabilities. These are processed in sequence by the
2467 * __IAVF_INIT_EXTENDED_CAPS driver state.
2469 adapter->extended_caps = IAVF_EXTENDED_CAPS;
2471 iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
2475 kfree(adapter->vf_res);
2476 adapter->vf_res = NULL;
2478 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2482 * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2483 * @adapter: board private structure
2485 * Function processes send of the extended VLAN V2 capability message to the
2486 * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
2487 * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2489 static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2493 WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
2495 ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
2496 if (ret && ret == -EOPNOTSUPP) {
2497 /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
2498 * we did not send the capability exchange message and do not
2499 * expect a response.
2501 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2504 /* We sent the message, so move on to the next step */
2505 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2509 * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2510 * @adapter: board private structure
2512 * Function processes receipt of the extended VLAN V2 capability message from
2515 static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2519 WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
2521 memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
2523 ret = iavf_get_vf_vlan_v2_caps(adapter);
2527 /* We've processed receipt of the VLAN V2 caps message */
2528 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2531 /* We didn't receive a reply. Make sure we try sending again when
2532 * __IAVF_INIT_FAILED attempts to recover.
2534 adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2535 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2539 * iavf_init_process_extended_caps - Part of driver startup
2540 * @adapter: board private structure
2542 * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
2543 * handles negotiating capabilities for features which require an additional
2546 * Once all extended capabilities exchanges are finished, the driver will
2547 * transition into __IAVF_INIT_CONFIG_ADAPTER.
2549 static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
2551 WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
2553 /* Process capability exchange for VLAN V2 */
2554 if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
2555 iavf_init_send_offload_vlan_v2_caps(adapter);
2557 } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
2558 iavf_init_recv_offload_vlan_v2_caps(adapter);
2562 /* When we reach here, no further extended capabilities exchanges are
2563 * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
2565 iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
2569 * iavf_init_config_adapter - last part of driver startup
2570 * @adapter: board private structure
2572 * After all the supported capabilities are negotiated, then the
2573 * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
2575 static void iavf_init_config_adapter(struct iavf_adapter *adapter)
2577 struct net_device *netdev = adapter->netdev;
2578 struct pci_dev *pdev = adapter->pdev;
2581 WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
2583 if (iavf_process_config(adapter))
2586 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2588 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
2590 netdev->netdev_ops = &iavf_netdev_ops;
2591 iavf_set_ethtool_ops(netdev);
2592 netdev->watchdog_timeo = 5 * HZ;
2594 /* MTU range: 68 - 9710 */
2595 netdev->min_mtu = ETH_MIN_MTU;
2596 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
2598 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2599 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2600 adapter->hw.mac.addr);
2601 eth_hw_addr_random(netdev);
2602 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2604 eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2605 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2608 adapter->flags |= IAVF_FLAG_INITIAL_MAC_SET;
2610 adapter->tx_desc_count = IAVF_DEFAULT_TXD;
2611 adapter->rx_desc_count = IAVF_DEFAULT_RXD;
2612 err = iavf_init_interrupt_scheme(adapter);
2615 iavf_map_rings_to_vectors(adapter);
2616 if (adapter->vf_res->vf_cap_flags &
2617 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2618 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
2620 err = iavf_request_misc_irq(adapter);
2624 netif_carrier_off(netdev);
2625 adapter->link_up = false;
2627 /* set the semaphore to prevent any callbacks after device registration
2628 * up to time when state of driver will be set to __IAVF_DOWN
2631 if (!adapter->netdev_registered) {
2632 err = register_netdevice(netdev);
2639 adapter->netdev_registered = true;
2641 netif_tx_stop_all_queues(netdev);
2642 if (CLIENT_ALLOWED(adapter)) {
2643 err = iavf_lan_add_device(adapter);
2645 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2648 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2649 if (netdev->features & NETIF_F_GRO)
2650 dev_info(&pdev->dev, "GRO is enabled\n");
2652 iavf_change_state(adapter, __IAVF_DOWN);
2653 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2656 iavf_misc_irq_enable(adapter);
2657 wake_up(&adapter->down_waitqueue);
2659 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2660 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2661 if (!adapter->rss_key || !adapter->rss_lut) {
2665 if (RSS_AQ(adapter))
2666 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2668 iavf_init_rss(adapter);
2670 if (VLAN_V2_ALLOWED(adapter))
2671 /* request initial VLAN offload settings */
2672 iavf_set_vlan_offload_features(adapter, 0, netdev->features);
2676 iavf_free_rss(adapter);
2678 iavf_free_misc_irq(adapter);
2680 iavf_reset_interrupt_capability(adapter);
2682 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2686 * iavf_watchdog_task - Periodic call-back task
2687 * @work: pointer to work_struct
2689 static void iavf_watchdog_task(struct work_struct *work)
2691 struct iavf_adapter *adapter = container_of(work,
2692 struct iavf_adapter,
2693 watchdog_task.work);
2694 struct iavf_hw *hw = &adapter->hw;
2697 if (!mutex_trylock(&adapter->crit_lock)) {
2698 if (adapter->state == __IAVF_REMOVE)
2701 goto restart_watchdog;
2704 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2705 iavf_change_state(adapter, __IAVF_COMM_FAILED);
2707 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2708 adapter->aq_required = 0;
2709 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2710 mutex_unlock(&adapter->crit_lock);
2711 queue_work(iavf_wq, &adapter->reset_task);
2715 switch (adapter->state) {
2716 case __IAVF_STARTUP:
2717 iavf_startup(adapter);
2718 mutex_unlock(&adapter->crit_lock);
2719 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2720 msecs_to_jiffies(30));
2722 case __IAVF_INIT_VERSION_CHECK:
2723 iavf_init_version_check(adapter);
2724 mutex_unlock(&adapter->crit_lock);
2725 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2726 msecs_to_jiffies(30));
2728 case __IAVF_INIT_GET_RESOURCES:
2729 iavf_init_get_resources(adapter);
2730 mutex_unlock(&adapter->crit_lock);
2731 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2732 msecs_to_jiffies(1));
2734 case __IAVF_INIT_EXTENDED_CAPS:
2735 iavf_init_process_extended_caps(adapter);
2736 mutex_unlock(&adapter->crit_lock);
2737 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2738 msecs_to_jiffies(1));
2740 case __IAVF_INIT_CONFIG_ADAPTER:
2741 iavf_init_config_adapter(adapter);
2742 mutex_unlock(&adapter->crit_lock);
2743 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2744 msecs_to_jiffies(1));
2746 case __IAVF_INIT_FAILED:
2747 if (test_bit(__IAVF_IN_REMOVE_TASK,
2748 &adapter->crit_section)) {
2749 /* Do not update the state and do not reschedule
2750 * watchdog task, iavf_remove should handle this state
2751 * as it can loop forever
2753 mutex_unlock(&adapter->crit_lock);
2756 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2757 dev_err(&adapter->pdev->dev,
2758 "Failed to communicate with PF; waiting before retry\n");
2759 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2760 iavf_shutdown_adminq(hw);
2761 mutex_unlock(&adapter->crit_lock);
2762 queue_delayed_work(iavf_wq,
2763 &adapter->watchdog_task, (5 * HZ));
2766 /* Try again from failed step*/
2767 iavf_change_state(adapter, adapter->last_state);
2768 mutex_unlock(&adapter->crit_lock);
2769 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
2771 case __IAVF_COMM_FAILED:
2772 if (test_bit(__IAVF_IN_REMOVE_TASK,
2773 &adapter->crit_section)) {
2774 /* Set state to __IAVF_INIT_FAILED and perform remove
2775 * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
2776 * doesn't bring the state back to __IAVF_COMM_FAILED.
2778 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2779 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2780 mutex_unlock(&adapter->crit_lock);
2783 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2784 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2785 if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2786 reg_val == VIRTCHNL_VFR_COMPLETED) {
2787 /* A chance for redemption! */
2788 dev_err(&adapter->pdev->dev,
2789 "Hardware came out of reset. Attempting reinit.\n");
2790 /* When init task contacts the PF and
2791 * gets everything set up again, it'll restart the
2792 * watchdog for us. Down, boy. Sit. Stay. Woof.
2794 iavf_change_state(adapter, __IAVF_STARTUP);
2795 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2797 adapter->aq_required = 0;
2798 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2799 mutex_unlock(&adapter->crit_lock);
2800 queue_delayed_work(iavf_wq,
2801 &adapter->watchdog_task,
2802 msecs_to_jiffies(10));
2804 case __IAVF_RESETTING:
2805 mutex_unlock(&adapter->crit_lock);
2806 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2809 case __IAVF_DOWN_PENDING:
2810 case __IAVF_TESTING:
2811 case __IAVF_RUNNING:
2812 if (adapter->current_op) {
2813 if (!iavf_asq_done(hw)) {
2814 dev_dbg(&adapter->pdev->dev,
2815 "Admin queue timeout\n");
2816 iavf_send_api_ver(adapter);
2819 int ret = iavf_process_aq_command(adapter);
2821 /* An error will be returned if no commands were
2822 * processed; use this opportunity to update stats
2823 * if the error isn't -ENOTSUPP
2825 if (ret && ret != -EOPNOTSUPP &&
2826 adapter->state == __IAVF_RUNNING)
2827 iavf_request_stats(adapter);
2829 if (adapter->state == __IAVF_RUNNING)
2830 iavf_detect_recover_hung(&adapter->vsi);
2834 mutex_unlock(&adapter->crit_lock);
2838 /* check for hw reset */
2839 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2841 adapter->flags |= IAVF_FLAG_RESET_PENDING;
2842 adapter->aq_required = 0;
2843 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2844 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2845 queue_work(iavf_wq, &adapter->reset_task);
2846 mutex_unlock(&adapter->crit_lock);
2847 queue_delayed_work(iavf_wq,
2848 &adapter->watchdog_task, HZ * 2);
2852 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
2853 mutex_unlock(&adapter->crit_lock);
2855 if (adapter->state >= __IAVF_DOWN)
2856 queue_work(iavf_wq, &adapter->adminq_task);
2857 if (adapter->aq_required)
2858 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2859 msecs_to_jiffies(20));
2861 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2865 * iavf_disable_vf - disable VF
2866 * @adapter: board private structure
2868 * Set communication failed flag and free all resources.
2869 * NOTE: This function is expected to be called with crit_lock being held.
2871 static void iavf_disable_vf(struct iavf_adapter *adapter)
2873 struct iavf_mac_filter *f, *ftmp;
2874 struct iavf_vlan_filter *fv, *fvtmp;
2875 struct iavf_cloud_filter *cf, *cftmp;
2877 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2879 /* We don't use netif_running() because it may be true prior to
2880 * ndo_open() returning, so we can't assume it means all our open
2881 * tasks have finished, since we're not holding the rtnl_lock here.
2883 if (adapter->state == __IAVF_RUNNING) {
2884 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2885 netif_carrier_off(adapter->netdev);
2886 netif_tx_disable(adapter->netdev);
2887 adapter->link_up = false;
2888 iavf_napi_disable_all(adapter);
2889 iavf_irq_disable(adapter);
2890 iavf_free_traffic_irqs(adapter);
2891 iavf_free_all_tx_resources(adapter);
2892 iavf_free_all_rx_resources(adapter);
2895 spin_lock_bh(&adapter->mac_vlan_list_lock);
2897 /* Delete all of the filters */
2898 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2903 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2904 list_del(&fv->list);
2908 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2910 spin_lock_bh(&adapter->cloud_filter_list_lock);
2911 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2912 list_del(&cf->list);
2914 adapter->num_cloud_filters--;
2916 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2918 iavf_free_misc_irq(adapter);
2919 iavf_reset_interrupt_capability(adapter);
2920 iavf_free_q_vectors(adapter);
2921 iavf_free_queues(adapter);
2922 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2923 iavf_shutdown_adminq(&adapter->hw);
2924 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2925 iavf_change_state(adapter, __IAVF_DOWN);
2926 wake_up(&adapter->down_waitqueue);
2927 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2931 * iavf_reset_task - Call-back task to handle hardware reset
2932 * @work: pointer to work_struct
2934 * During reset we need to shut down and reinitialize the admin queue
2935 * before we can use it to communicate with the PF again. We also clear
2936 * and reinit the rings because that context is lost as well.
2938 static void iavf_reset_task(struct work_struct *work)
2940 struct iavf_adapter *adapter = container_of(work,
2941 struct iavf_adapter,
2943 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2944 struct net_device *netdev = adapter->netdev;
2945 struct iavf_hw *hw = &adapter->hw;
2946 struct iavf_mac_filter *f, *ftmp;
2947 struct iavf_cloud_filter *cf;
2948 enum iavf_status status;
2953 /* Detach interface to avoid subsequent NDO callbacks */
2955 netif_device_detach(netdev);
2958 /* When device is being removed it doesn't make sense to run the reset
2959 * task, just return in such a case.
2961 if (!mutex_trylock(&adapter->crit_lock)) {
2962 if (adapter->state != __IAVF_REMOVE)
2963 queue_work(iavf_wq, &adapter->reset_task);
2968 while (!mutex_trylock(&adapter->client_lock))
2969 usleep_range(500, 1000);
2970 if (CLIENT_ENABLED(adapter)) {
2971 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
2972 IAVF_FLAG_CLIENT_NEEDS_CLOSE |
2973 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
2974 IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
2975 cancel_delayed_work_sync(&adapter->client_task);
2976 iavf_notify_client_close(&adapter->vsi, true);
2978 iavf_misc_irq_disable(adapter);
2979 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2980 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
2981 /* Restart the AQ here. If we have been reset but didn't
2982 * detect it, or if the PF had to reinit, our AQ will be hosed.
2984 iavf_shutdown_adminq(hw);
2985 iavf_init_adminq(hw);
2986 iavf_request_reset(adapter);
2988 adapter->flags |= IAVF_FLAG_RESET_PENDING;
2990 /* poll until we see the reset actually happen */
2991 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
2992 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2993 IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2996 usleep_range(5000, 10000);
2998 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
2999 dev_info(&adapter->pdev->dev, "Never saw reset\n");
3000 goto continue_reset; /* act like the reset happened */
3003 /* wait until the reset is complete and the PF is responding to us */
3004 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
3005 /* sleep first to make sure a minimum wait time is met */
3006 msleep(IAVF_RESET_WAIT_MS);
3008 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
3009 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3010 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
3014 pci_set_master(adapter->pdev);
3015 pci_restore_msi_state(adapter->pdev);
3017 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
3018 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
3020 iavf_disable_vf(adapter);
3021 mutex_unlock(&adapter->client_lock);
3022 mutex_unlock(&adapter->crit_lock);
3023 if (netif_running(netdev)) {
3028 return; /* Do not attempt to reinit. It's dead, Jim. */
3032 /* We don't use netif_running() because it may be true prior to
3033 * ndo_open() returning, so we can't assume it means all our open
3034 * tasks have finished, since we're not holding the rtnl_lock here.
3036 running = adapter->state == __IAVF_RUNNING;
3039 netif_carrier_off(netdev);
3040 netif_tx_stop_all_queues(netdev);
3041 adapter->link_up = false;
3042 iavf_napi_disable_all(adapter);
3044 iavf_irq_disable(adapter);
3046 iavf_change_state(adapter, __IAVF_RESETTING);
3047 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
3049 /* free the Tx/Rx rings and descriptors, might be better to just
3050 * re-use them sometime in the future
3052 iavf_free_all_rx_resources(adapter);
3053 iavf_free_all_tx_resources(adapter);
3055 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
3056 /* kill and reinit the admin queue */
3057 iavf_shutdown_adminq(hw);
3058 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3059 status = iavf_init_adminq(hw);
3061 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
3065 adapter->aq_required = 0;
3067 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3068 (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3069 err = iavf_reinit_interrupt_scheme(adapter);
3074 if (RSS_AQ(adapter)) {
3075 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
3077 err = iavf_init_rss(adapter);
3082 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
3083 /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
3084 * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
3085 * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
3086 * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
3087 * been successfully sent and negotiated
3089 adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
3090 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
3092 spin_lock_bh(&adapter->mac_vlan_list_lock);
3094 /* Delete filter for the current MAC address, it could have
3095 * been changed by the PF via administratively set MAC.
3096 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
3098 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3099 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
3104 /* re-add all MAC filters */
3105 list_for_each_entry(f, &adapter->mac_filter_list, list) {
3108 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3110 /* check if TCs are running and re-add all cloud filters */
3111 spin_lock_bh(&adapter->cloud_filter_list_lock);
3112 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
3114 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
3118 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3120 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
3121 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3122 iavf_misc_irq_enable(adapter);
3124 bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
3125 bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
3127 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
3129 /* We were running when the reset started, so we need to restore some
3133 /* allocate transmit descriptors */
3134 err = iavf_setup_all_tx_resources(adapter);
3138 /* allocate receive descriptors */
3139 err = iavf_setup_all_rx_resources(adapter);
3143 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3144 (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3145 err = iavf_request_traffic_irqs(adapter, netdev->name);
3149 adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
3152 iavf_configure(adapter);
3154 /* iavf_up_complete() will switch device back
3157 iavf_up_complete(adapter);
3159 iavf_irq_enable(adapter, true);
3161 iavf_change_state(adapter, __IAVF_DOWN);
3162 wake_up(&adapter->down_waitqueue);
3165 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3167 mutex_unlock(&adapter->client_lock);
3168 mutex_unlock(&adapter->crit_lock);
3173 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3174 iavf_free_traffic_irqs(adapter);
3176 iavf_disable_vf(adapter);
3178 mutex_unlock(&adapter->client_lock);
3179 mutex_unlock(&adapter->crit_lock);
3181 if (netif_running(netdev)) {
3182 /* Close device to ensure that Tx queues will not be started
3183 * during netif_device_attach() at the end of the reset task.
3190 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
3193 netif_device_attach(netdev);
3198 * iavf_adminq_task - worker thread to clean the admin queue
3199 * @work: pointer to work_struct containing our data
3201 static void iavf_adminq_task(struct work_struct *work)
3203 struct iavf_adapter *adapter =
3204 container_of(work, struct iavf_adapter, adminq_task);
3205 struct iavf_hw *hw = &adapter->hw;
3206 struct iavf_arq_event_info event;
3207 enum virtchnl_ops v_op;
3208 enum iavf_status ret, v_ret;
3212 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
3215 if (!mutex_trylock(&adapter->crit_lock)) {
3216 if (adapter->state == __IAVF_REMOVE)
3219 queue_work(iavf_wq, &adapter->adminq_task);
3223 event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
3224 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
3229 ret = iavf_clean_arq_element(hw, &event, &pending);
3230 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
3231 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
3234 break; /* No event to process or error cleaning ARQ */
3236 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
3239 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
3241 mutex_unlock(&adapter->crit_lock);
3243 if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
3244 if (adapter->netdev_registered ||
3245 !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
3246 struct net_device *netdev = adapter->netdev;
3249 netdev_update_features(netdev);
3251 /* Request VLAN offload settings */
3252 if (VLAN_V2_ALLOWED(adapter))
3253 iavf_set_vlan_offload_features
3254 (adapter, 0, netdev->features);
3256 iavf_set_queue_vlan_tag_loc(adapter);
3259 adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
3261 if ((adapter->flags &
3262 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
3263 adapter->state == __IAVF_RESETTING)
3266 /* check for error indications */
3267 val = rd32(hw, hw->aq.arq.len);
3268 if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
3271 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
3272 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
3273 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
3275 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
3276 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
3277 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
3279 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
3280 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
3281 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
3284 wr32(hw, hw->aq.arq.len, val);
3286 val = rd32(hw, hw->aq.asq.len);
3288 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
3289 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
3290 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
3292 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
3293 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
3294 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
3296 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
3297 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
3298 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
3301 wr32(hw, hw->aq.asq.len, val);
3304 kfree(event.msg_buf);
3306 /* re-enable Admin queue interrupt cause */
3307 iavf_misc_irq_enable(adapter);
3311 * iavf_client_task - worker thread to perform client work
3312 * @work: pointer to work_struct containing our data
3314 * This task handles client interactions. Because client calls can be
3315 * reentrant, we can't handle them in the watchdog.
3317 static void iavf_client_task(struct work_struct *work)
3319 struct iavf_adapter *adapter =
3320 container_of(work, struct iavf_adapter, client_task.work);
3322 /* If we can't get the client bit, just give up. We'll be rescheduled
3326 if (!mutex_trylock(&adapter->client_lock))
3329 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
3330 iavf_client_subtask(adapter);
3331 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3334 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
3335 iavf_notify_client_l2_params(&adapter->vsi);
3336 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
3339 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
3340 iavf_notify_client_close(&adapter->vsi, false);
3341 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3344 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
3345 iavf_notify_client_open(&adapter->vsi);
3346 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
3349 mutex_unlock(&adapter->client_lock);
3353 * iavf_free_all_tx_resources - Free Tx Resources for All Queues
3354 * @adapter: board private structure
3356 * Free all transmit software resources
3358 void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
3362 if (!adapter->tx_rings)
3365 for (i = 0; i < adapter->num_active_queues; i++)
3366 if (adapter->tx_rings[i].desc)
3367 iavf_free_tx_resources(&adapter->tx_rings[i]);
3371 * iavf_setup_all_tx_resources - allocate all queues Tx resources
3372 * @adapter: board private structure
3374 * If this function returns with an error, then it's possible one or
3375 * more of the rings is populated (while the rest are not). It is the
3376 * callers duty to clean those orphaned rings.
3378 * Return 0 on success, negative on failure
3380 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
3384 for (i = 0; i < adapter->num_active_queues; i++) {
3385 adapter->tx_rings[i].count = adapter->tx_desc_count;
3386 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
3389 dev_err(&adapter->pdev->dev,
3390 "Allocation for Tx Queue %u failed\n", i);
3398 * iavf_setup_all_rx_resources - allocate all queues Rx resources
3399 * @adapter: board private structure
3401 * If this function returns with an error, then it's possible one or
3402 * more of the rings is populated (while the rest are not). It is the
3403 * callers duty to clean those orphaned rings.
3405 * Return 0 on success, negative on failure
3407 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
3411 for (i = 0; i < adapter->num_active_queues; i++) {
3412 adapter->rx_rings[i].count = adapter->rx_desc_count;
3413 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
3416 dev_err(&adapter->pdev->dev,
3417 "Allocation for Rx Queue %u failed\n", i);
3424 * iavf_free_all_rx_resources - Free Rx Resources for All Queues
3425 * @adapter: board private structure
3427 * Free all receive software resources
3429 void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
3433 if (!adapter->rx_rings)
3436 for (i = 0; i < adapter->num_active_queues; i++)
3437 if (adapter->rx_rings[i].desc)
3438 iavf_free_rx_resources(&adapter->rx_rings[i]);
3442 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
3443 * @adapter: board private structure
3444 * @max_tx_rate: max Tx bw for a tc
3446 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
3449 int speed = 0, ret = 0;
3451 if (ADV_LINK_SUPPORT(adapter)) {
3452 if (adapter->link_speed_mbps < U32_MAX) {
3453 speed = adapter->link_speed_mbps;
3456 dev_err(&adapter->pdev->dev, "Unknown link speed\n");
3461 switch (adapter->link_speed) {
3462 case VIRTCHNL_LINK_SPEED_40GB:
3463 speed = SPEED_40000;
3465 case VIRTCHNL_LINK_SPEED_25GB:
3466 speed = SPEED_25000;
3468 case VIRTCHNL_LINK_SPEED_20GB:
3469 speed = SPEED_20000;
3471 case VIRTCHNL_LINK_SPEED_10GB:
3472 speed = SPEED_10000;
3474 case VIRTCHNL_LINK_SPEED_5GB:
3477 case VIRTCHNL_LINK_SPEED_2_5GB:
3480 case VIRTCHNL_LINK_SPEED_1GB:
3483 case VIRTCHNL_LINK_SPEED_100MB:
3491 if (max_tx_rate > speed) {
3492 dev_err(&adapter->pdev->dev,
3493 "Invalid tx rate specified\n");
3501 * iavf_validate_ch_config - validate queue mapping info
3502 * @adapter: board private structure
3503 * @mqprio_qopt: queue parameters
3505 * This function validates if the config provided by the user to
3506 * configure queue channels is valid or not. Returns 0 on a valid
3509 static int iavf_validate_ch_config(struct iavf_adapter *adapter,
3510 struct tc_mqprio_qopt_offload *mqprio_qopt)
3512 u64 total_max_rate = 0;
3513 u32 tx_rate_rem = 0;
3518 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
3519 mqprio_qopt->qopt.num_tc < 1)
3522 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
3523 if (!mqprio_qopt->qopt.count[i] ||
3524 mqprio_qopt->qopt.offset[i] != num_qps)
3526 if (mqprio_qopt->min_rate[i]) {
3527 dev_err(&adapter->pdev->dev,
3528 "Invalid min tx rate (greater than 0) specified for TC%d\n",
3533 /* convert to Mbps */
3534 tx_rate = div_u64(mqprio_qopt->max_rate[i],
3537 if (mqprio_qopt->max_rate[i] &&
3538 tx_rate < IAVF_MBPS_QUANTA) {
3539 dev_err(&adapter->pdev->dev,
3540 "Invalid max tx rate for TC%d, minimum %dMbps\n",
3541 i, IAVF_MBPS_QUANTA);
3545 (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
3547 if (tx_rate_rem != 0) {
3548 dev_err(&adapter->pdev->dev,
3549 "Invalid max tx rate for TC%d, not divisible by %d\n",
3550 i, IAVF_MBPS_QUANTA);
3554 total_max_rate += tx_rate;
3555 num_qps += mqprio_qopt->qopt.count[i];
3557 if (num_qps > adapter->num_active_queues) {
3558 dev_err(&adapter->pdev->dev,
3559 "Cannot support requested number of queues\n");
3563 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
3568 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
3569 * @adapter: board private structure
3571 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
3573 struct iavf_cloud_filter *cf, *cftmp;
3575 spin_lock_bh(&adapter->cloud_filter_list_lock);
3576 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
3578 list_del(&cf->list);
3580 adapter->num_cloud_filters--;
3582 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3586 * __iavf_setup_tc - configure multiple traffic classes
3587 * @netdev: network interface device structure
3588 * @type_data: tc offload data
3590 * This function processes the config information provided by the
3591 * user to configure traffic classes/queue channels and packages the
3592 * information to request the PF to setup traffic classes.
3594 * Returns 0 on success.
3596 static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
3598 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
3599 struct iavf_adapter *adapter = netdev_priv(netdev);
3600 struct virtchnl_vf_resource *vfres = adapter->vf_res;
3601 u8 num_tc = 0, total_qps = 0;
3602 int ret = 0, netdev_tc = 0;
3607 num_tc = mqprio_qopt->qopt.num_tc;
3608 mode = mqprio_qopt->mode;
3610 /* delete queue_channel */
3611 if (!mqprio_qopt->qopt.hw) {
3612 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
3613 /* reset the tc configuration */
3614 netdev_reset_tc(netdev);
3615 adapter->num_tc = 0;
3616 netif_tx_stop_all_queues(netdev);
3617 netif_tx_disable(netdev);
3618 iavf_del_all_cloud_filters(adapter);
3619 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
3620 total_qps = adapter->orig_num_active_queues;
3627 /* add queue channel */
3628 if (mode == TC_MQPRIO_MODE_CHANNEL) {
3629 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3630 dev_err(&adapter->pdev->dev, "ADq not supported\n");
3633 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
3634 dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
3638 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
3641 /* Return if same TC config is requested */
3642 if (adapter->num_tc == num_tc)
3644 adapter->num_tc = num_tc;
3646 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3648 adapter->ch_config.ch_info[i].count =
3649 mqprio_qopt->qopt.count[i];
3650 adapter->ch_config.ch_info[i].offset =
3651 mqprio_qopt->qopt.offset[i];
3652 total_qps += mqprio_qopt->qopt.count[i];
3653 max_tx_rate = mqprio_qopt->max_rate[i];
3654 /* convert to Mbps */
3655 max_tx_rate = div_u64(max_tx_rate,
3657 adapter->ch_config.ch_info[i].max_tx_rate =
3660 adapter->ch_config.ch_info[i].count = 1;
3661 adapter->ch_config.ch_info[i].offset = 0;
3665 /* Take snapshot of original config such as "num_active_queues"
3666 * It is used later when delete ADQ flow is exercised, so that
3667 * once delete ADQ flow completes, VF shall go back to its
3668 * original queue configuration
3671 adapter->orig_num_active_queues = adapter->num_active_queues;
3673 /* Store queue info based on TC so that VF gets configured
3674 * with correct number of queues when VF completes ADQ config
3677 adapter->ch_config.total_qps = total_qps;
3679 netif_tx_stop_all_queues(netdev);
3680 netif_tx_disable(netdev);
3681 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
3682 netdev_reset_tc(netdev);
3683 /* Report the tc mapping up the stack */
3684 netdev_set_num_tc(adapter->netdev, num_tc);
3685 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3686 u16 qcount = mqprio_qopt->qopt.count[i];
3687 u16 qoffset = mqprio_qopt->qopt.offset[i];
3690 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
3695 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
3698 netif_set_real_num_rx_queues(netdev, total_qps);
3699 netif_set_real_num_tx_queues(netdev, total_qps);
3705 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
3706 * @adapter: board private structure
3707 * @f: pointer to struct flow_cls_offload
3708 * @filter: pointer to cloud filter structure
3710 static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
3711 struct flow_cls_offload *f,
3712 struct iavf_cloud_filter *filter)
3714 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3715 struct flow_dissector *dissector = rule->match.dissector;
3716 u16 n_proto_mask = 0;
3717 u16 n_proto_key = 0;
3722 struct virtchnl_filter *vf = &filter->f;
3724 if (dissector->used_keys &
3725 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
3726 BIT(FLOW_DISSECTOR_KEY_BASIC) |
3727 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
3728 BIT(FLOW_DISSECTOR_KEY_VLAN) |
3729 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
3730 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
3731 BIT(FLOW_DISSECTOR_KEY_PORTS) |
3732 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
3733 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
3734 dissector->used_keys);
3738 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
3739 struct flow_match_enc_keyid match;
3741 flow_rule_match_enc_keyid(rule, &match);
3742 if (match.mask->keyid != 0)
3743 field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
3746 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
3747 struct flow_match_basic match;
3749 flow_rule_match_basic(rule, &match);
3750 n_proto_key = ntohs(match.key->n_proto);
3751 n_proto_mask = ntohs(match.mask->n_proto);
3753 if (n_proto_key == ETH_P_ALL) {
3757 n_proto = n_proto_key & n_proto_mask;
3758 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
3760 if (n_proto == ETH_P_IPV6) {
3761 /* specify flow type as TCP IPv6 */
3762 vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
3765 if (match.key->ip_proto != IPPROTO_TCP) {
3766 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
3771 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
3772 struct flow_match_eth_addrs match;
3774 flow_rule_match_eth_addrs(rule, &match);
3776 /* use is_broadcast and is_zero to check for all 0xf or 0 */
3777 if (!is_zero_ether_addr(match.mask->dst)) {
3778 if (is_broadcast_ether_addr(match.mask->dst)) {
3779 field_flags |= IAVF_CLOUD_FIELD_OMAC;
3781 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
3787 if (!is_zero_ether_addr(match.mask->src)) {
3788 if (is_broadcast_ether_addr(match.mask->src)) {
3789 field_flags |= IAVF_CLOUD_FIELD_IMAC;
3791 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
3797 if (!is_zero_ether_addr(match.key->dst))
3798 if (is_valid_ether_addr(match.key->dst) ||
3799 is_multicast_ether_addr(match.key->dst)) {
3800 /* set the mask if a valid dst_mac address */
3801 for (i = 0; i < ETH_ALEN; i++)
3802 vf->mask.tcp_spec.dst_mac[i] |= 0xff;
3803 ether_addr_copy(vf->data.tcp_spec.dst_mac,
3807 if (!is_zero_ether_addr(match.key->src))
3808 if (is_valid_ether_addr(match.key->src) ||
3809 is_multicast_ether_addr(match.key->src)) {
3810 /* set the mask if a valid dst_mac address */
3811 for (i = 0; i < ETH_ALEN; i++)
3812 vf->mask.tcp_spec.src_mac[i] |= 0xff;
3813 ether_addr_copy(vf->data.tcp_spec.src_mac,
3818 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
3819 struct flow_match_vlan match;
3821 flow_rule_match_vlan(rule, &match);
3822 if (match.mask->vlan_id) {
3823 if (match.mask->vlan_id == VLAN_VID_MASK) {
3824 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
3826 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
3827 match.mask->vlan_id);
3831 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
3832 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
3835 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
3836 struct flow_match_control match;
3838 flow_rule_match_control(rule, &match);
3839 addr_type = match.key->addr_type;
3842 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
3843 struct flow_match_ipv4_addrs match;
3845 flow_rule_match_ipv4_addrs(rule, &match);
3846 if (match.mask->dst) {
3847 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
3848 field_flags |= IAVF_CLOUD_FIELD_IIP;
3850 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
3851 be32_to_cpu(match.mask->dst));
3856 if (match.mask->src) {
3857 if (match.mask->src == cpu_to_be32(0xffffffff)) {
3858 field_flags |= IAVF_CLOUD_FIELD_IIP;
3860 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
3861 be32_to_cpu(match.mask->dst));
3866 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
3867 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
3870 if (match.key->dst) {
3871 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
3872 vf->data.tcp_spec.dst_ip[0] = match.key->dst;
3874 if (match.key->src) {
3875 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
3876 vf->data.tcp_spec.src_ip[0] = match.key->src;
3880 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
3881 struct flow_match_ipv6_addrs match;
3883 flow_rule_match_ipv6_addrs(rule, &match);
3885 /* validate mask, make sure it is not IPV6_ADDR_ANY */
3886 if (ipv6_addr_any(&match.mask->dst)) {
3887 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
3892 /* src and dest IPv6 address should not be LOOPBACK
3893 * (0:0:0:0:0:0:0:1) which can be represented as ::1
3895 if (ipv6_addr_loopback(&match.key->dst) ||
3896 ipv6_addr_loopback(&match.key->src)) {
3897 dev_err(&adapter->pdev->dev,
3898 "ipv6 addr should not be loopback\n");
3901 if (!ipv6_addr_any(&match.mask->dst) ||
3902 !ipv6_addr_any(&match.mask->src))
3903 field_flags |= IAVF_CLOUD_FIELD_IIP;
3905 for (i = 0; i < 4; i++)
3906 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
3907 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
3908 sizeof(vf->data.tcp_spec.dst_ip));
3909 for (i = 0; i < 4; i++)
3910 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
3911 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
3912 sizeof(vf->data.tcp_spec.src_ip));
3914 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
3915 struct flow_match_ports match;
3917 flow_rule_match_ports(rule, &match);
3918 if (match.mask->src) {
3919 if (match.mask->src == cpu_to_be16(0xffff)) {
3920 field_flags |= IAVF_CLOUD_FIELD_IIP;
3922 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
3923 be16_to_cpu(match.mask->src));
3928 if (match.mask->dst) {
3929 if (match.mask->dst == cpu_to_be16(0xffff)) {
3930 field_flags |= IAVF_CLOUD_FIELD_IIP;
3932 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
3933 be16_to_cpu(match.mask->dst));
3937 if (match.key->dst) {
3938 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
3939 vf->data.tcp_spec.dst_port = match.key->dst;
3942 if (match.key->src) {
3943 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
3944 vf->data.tcp_spec.src_port = match.key->src;
3947 vf->field_flags = field_flags;
3953 * iavf_handle_tclass - Forward to a traffic class on the device
3954 * @adapter: board private structure
3955 * @tc: traffic class index on the device
3956 * @filter: pointer to cloud filter structure
3958 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
3959 struct iavf_cloud_filter *filter)
3963 if (tc < adapter->num_tc) {
3964 if (!filter->f.data.tcp_spec.dst_port) {
3965 dev_err(&adapter->pdev->dev,
3966 "Specify destination port to redirect to traffic class other than TC0\n");
3970 /* redirect to a traffic class on the same device */
3971 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
3972 filter->f.action_meta = tc;
3977 * iavf_find_cf - Find the cloud filter in the list
3978 * @adapter: Board private structure
3979 * @cookie: filter specific cookie
3981 * Returns ptr to the filter object or NULL. Must be called while holding the
3982 * cloud_filter_list_lock.
3984 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
3985 unsigned long *cookie)
3987 struct iavf_cloud_filter *filter = NULL;
3992 list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
3993 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
4000 * iavf_configure_clsflower - Add tc flower filters
4001 * @adapter: board private structure
4002 * @cls_flower: Pointer to struct flow_cls_offload
4004 static int iavf_configure_clsflower(struct iavf_adapter *adapter,
4005 struct flow_cls_offload *cls_flower)
4007 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
4008 struct iavf_cloud_filter *filter = NULL;
4009 int err = -EINVAL, count = 50;
4012 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
4016 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
4020 while (!mutex_trylock(&adapter->crit_lock)) {
4028 filter->cookie = cls_flower->cookie;
4030 /* bail out here if filter already exists */
4031 spin_lock_bh(&adapter->cloud_filter_list_lock);
4032 if (iavf_find_cf(adapter, &cls_flower->cookie)) {
4033 dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
4037 spin_unlock_bh(&adapter->cloud_filter_list_lock);
4039 /* set the mask to all zeroes to begin with */
4040 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
4041 /* start out with flow type and eth type IPv4 to begin with */
4042 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
4043 err = iavf_parse_cls_flower(adapter, cls_flower, filter);
4047 err = iavf_handle_tclass(adapter, tc, filter);
4051 /* add filter to the list */
4052 spin_lock_bh(&adapter->cloud_filter_list_lock);
4053 list_add_tail(&filter->list, &adapter->cloud_filter_list);
4054 adapter->num_cloud_filters++;
4056 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
4058 spin_unlock_bh(&adapter->cloud_filter_list_lock);
4063 mutex_unlock(&adapter->crit_lock);
4068 * iavf_delete_clsflower - Remove tc flower filters
4069 * @adapter: board private structure
4070 * @cls_flower: Pointer to struct flow_cls_offload
4072 static int iavf_delete_clsflower(struct iavf_adapter *adapter,
4073 struct flow_cls_offload *cls_flower)
4075 struct iavf_cloud_filter *filter = NULL;
4078 spin_lock_bh(&adapter->cloud_filter_list_lock);
4079 filter = iavf_find_cf(adapter, &cls_flower->cookie);
4082 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
4086 spin_unlock_bh(&adapter->cloud_filter_list_lock);
4092 * iavf_setup_tc_cls_flower - flower classifier offloads
4093 * @adapter: board private structure
4094 * @cls_flower: pointer to flow_cls_offload struct with flow info
4096 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
4097 struct flow_cls_offload *cls_flower)
4099 switch (cls_flower->command) {
4100 case FLOW_CLS_REPLACE:
4101 return iavf_configure_clsflower(adapter, cls_flower);
4102 case FLOW_CLS_DESTROY:
4103 return iavf_delete_clsflower(adapter, cls_flower);
4104 case FLOW_CLS_STATS:
4112 * iavf_setup_tc_block_cb - block callback for tc
4113 * @type: type of offload
4114 * @type_data: offload data
4117 * This function is the block callback for traffic classes
4119 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4122 struct iavf_adapter *adapter = cb_priv;
4124 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
4128 case TC_SETUP_CLSFLOWER:
4129 return iavf_setup_tc_cls_flower(cb_priv, type_data);
4135 static LIST_HEAD(iavf_block_cb_list);
4138 * iavf_setup_tc - configure multiple traffic classes
4139 * @netdev: network interface device structure
4140 * @type: type of offload
4141 * @type_data: tc offload data
4143 * This function is the callback to ndo_setup_tc in the
4146 * Returns 0 on success
4148 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
4151 struct iavf_adapter *adapter = netdev_priv(netdev);
4154 case TC_SETUP_QDISC_MQPRIO:
4155 return __iavf_setup_tc(netdev, type_data);
4156 case TC_SETUP_BLOCK:
4157 return flow_block_cb_setup_simple(type_data,
4158 &iavf_block_cb_list,
4159 iavf_setup_tc_block_cb,
4160 adapter, adapter, true);
4167 * iavf_open - Called when a network interface is made active
4168 * @netdev: network interface device structure
4170 * Returns 0 on success, negative value on failure
4172 * The open entry point is called when a network interface is made
4173 * active by the system (IFF_UP). At this point all resources needed
4174 * for transmit and receive operations are allocated, the interrupt
4175 * handler is registered with the OS, the watchdog is started,
4176 * and the stack is notified that the interface is ready.
4178 static int iavf_open(struct net_device *netdev)
4180 struct iavf_adapter *adapter = netdev_priv(netdev);
4183 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
4184 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
4188 while (!mutex_trylock(&adapter->crit_lock)) {
4189 /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
4190 * is already taken and iavf_open is called from an upper
4191 * device's notifier reacting on NETDEV_REGISTER event.
4192 * We have to leave here to avoid dead lock.
4194 if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
4197 usleep_range(500, 1000);
4200 if (adapter->state != __IAVF_DOWN) {
4205 if (adapter->state == __IAVF_RUNNING &&
4206 !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
4207 dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
4212 /* allocate transmit descriptors */
4213 err = iavf_setup_all_tx_resources(adapter);
4217 /* allocate receive descriptors */
4218 err = iavf_setup_all_rx_resources(adapter);
4222 /* clear any pending interrupts, may auto mask */
4223 err = iavf_request_traffic_irqs(adapter, netdev->name);
4227 spin_lock_bh(&adapter->mac_vlan_list_lock);
4229 iavf_add_filter(adapter, adapter->hw.mac.addr);
4231 spin_unlock_bh(&adapter->mac_vlan_list_lock);
4233 /* Restore VLAN filters that were removed with IFF_DOWN */
4234 iavf_restore_filters(adapter);
4236 iavf_configure(adapter);
4238 iavf_up_complete(adapter);
4240 iavf_irq_enable(adapter, true);
4242 mutex_unlock(&adapter->crit_lock);
4248 iavf_free_traffic_irqs(adapter);
4250 iavf_free_all_rx_resources(adapter);
4252 iavf_free_all_tx_resources(adapter);
4254 mutex_unlock(&adapter->crit_lock);
4260 * iavf_close - Disables a network interface
4261 * @netdev: network interface device structure
4263 * Returns 0, this is not allowed to fail
4265 * The close entry point is called when an interface is de-activated
4266 * by the OS. The hardware is still under the drivers control, but
4267 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
4268 * are freed, along with all transmit and receive resources.
4270 static int iavf_close(struct net_device *netdev)
4272 struct iavf_adapter *adapter = netdev_priv(netdev);
4276 mutex_lock(&adapter->crit_lock);
4278 if (adapter->state <= __IAVF_DOWN_PENDING) {
4279 mutex_unlock(&adapter->crit_lock);
4283 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
4284 if (CLIENT_ENABLED(adapter))
4285 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
4286 /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
4287 * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
4288 * deadlock with adminq_task() until iavf_close timeouts. We must send
4289 * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
4290 * disable queues possible for vf. Give only necessary flags to
4291 * iavf_down and save other to set them right before iavf_close()
4292 * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
4293 * iavf will be in DOWN state.
4295 aq_to_restore = adapter->aq_required;
4296 adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
4298 /* Remove flags which we do not want to send after close or we want to
4299 * send before disable queues.
4301 aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG |
4302 IAVF_FLAG_AQ_ENABLE_QUEUES |
4303 IAVF_FLAG_AQ_CONFIGURE_QUEUES |
4304 IAVF_FLAG_AQ_ADD_VLAN_FILTER |
4305 IAVF_FLAG_AQ_ADD_MAC_FILTER |
4306 IAVF_FLAG_AQ_ADD_CLOUD_FILTER |
4307 IAVF_FLAG_AQ_ADD_FDIR_FILTER |
4308 IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
4311 iavf_change_state(adapter, __IAVF_DOWN_PENDING);
4312 iavf_free_traffic_irqs(adapter);
4314 mutex_unlock(&adapter->crit_lock);
4316 /* We explicitly don't free resources here because the hardware is
4317 * still active and can DMA into memory. Resources are cleared in
4318 * iavf_virtchnl_completion() after we get confirmation from the PF
4319 * driver that the rings have been stopped.
4321 * Also, we wait for state to transition to __IAVF_DOWN before
4322 * returning. State change occurs in iavf_virtchnl_completion() after
4323 * VF resources are released (which occurs after PF driver processes and
4324 * responds to admin queue commands).
4327 status = wait_event_timeout(adapter->down_waitqueue,
4328 adapter->state == __IAVF_DOWN,
4329 msecs_to_jiffies(500));
4331 netdev_warn(netdev, "Device resources not yet released\n");
4333 mutex_lock(&adapter->crit_lock);
4334 adapter->aq_required |= aq_to_restore;
4335 mutex_unlock(&adapter->crit_lock);
4340 * iavf_change_mtu - Change the Maximum Transfer Unit
4341 * @netdev: network interface device structure
4342 * @new_mtu: new value for maximum frame size
4344 * Returns 0 on success, negative on failure
4346 static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
4348 struct iavf_adapter *adapter = netdev_priv(netdev);
4350 netdev_dbg(netdev, "changing MTU from %d to %d\n",
4351 netdev->mtu, new_mtu);
4352 netdev->mtu = new_mtu;
4353 if (CLIENT_ENABLED(adapter)) {
4354 iavf_notify_client_l2_params(&adapter->vsi);
4355 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
4358 if (netif_running(netdev)) {
4359 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
4360 queue_work(iavf_wq, &adapter->reset_task);
4366 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
4367 NETIF_F_HW_VLAN_CTAG_TX | \
4368 NETIF_F_HW_VLAN_STAG_RX | \
4369 NETIF_F_HW_VLAN_STAG_TX)
4372 * iavf_set_features - set the netdev feature flags
4373 * @netdev: ptr to the netdev being adjusted
4374 * @features: the feature set that the stack is suggesting
4375 * Note: expects to be called while under rtnl_lock()
4377 static int iavf_set_features(struct net_device *netdev,
4378 netdev_features_t features)
4380 struct iavf_adapter *adapter = netdev_priv(netdev);
4382 /* trigger update on any VLAN feature change */
4383 if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
4384 (features & NETIF_VLAN_OFFLOAD_FEATURES))
4385 iavf_set_vlan_offload_features(adapter, netdev->features,
4392 * iavf_features_check - Validate encapsulated packet conforms to limits
4394 * @dev: This physical port's netdev
4395 * @features: Offload features that the stack believes apply
4397 static netdev_features_t iavf_features_check(struct sk_buff *skb,
4398 struct net_device *dev,
4399 netdev_features_t features)
4403 /* No point in doing any of this if neither checksum nor GSO are
4404 * being requested for this frame. We can rule out both by just
4405 * checking for CHECKSUM_PARTIAL
4407 if (skb->ip_summed != CHECKSUM_PARTIAL)
4410 /* We cannot support GSO if the MSS is going to be less than
4411 * 64 bytes. If it is then we need to drop support for GSO.
4413 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
4414 features &= ~NETIF_F_GSO_MASK;
4416 /* MACLEN can support at most 63 words */
4417 len = skb_network_header(skb) - skb->data;
4418 if (len & ~(63 * 2))
4421 /* IPLEN and EIPLEN can support at most 127 dwords */
4422 len = skb_transport_header(skb) - skb_network_header(skb);
4423 if (len & ~(127 * 4))
4426 if (skb->encapsulation) {
4427 /* L4TUNLEN can support 127 words */
4428 len = skb_inner_network_header(skb) - skb_transport_header(skb);
4429 if (len & ~(127 * 2))
4432 /* IPLEN can support at most 127 dwords */
4433 len = skb_inner_transport_header(skb) -
4434 skb_inner_network_header(skb);
4435 if (len & ~(127 * 4))
4439 /* No need to validate L4LEN as TCP is the only protocol with a
4440 * flexible value and we support all possible values supported
4441 * by TCP, which is at most 15 dwords
4446 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4450 * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
4451 * @adapter: board private structure
4453 * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4454 * were negotiated determine the VLAN features that can be toggled on and off.
4456 static netdev_features_t
4457 iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
4459 netdev_features_t hw_features = 0;
4461 if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4464 /* Enable VLAN features if supported */
4465 if (VLAN_ALLOWED(adapter)) {
4466 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
4467 NETIF_F_HW_VLAN_CTAG_RX);
4468 } else if (VLAN_V2_ALLOWED(adapter)) {
4469 struct virtchnl_vlan_caps *vlan_v2_caps =
4470 &adapter->vlan_v2_caps;
4471 struct virtchnl_vlan_supported_caps *stripping_support =
4472 &vlan_v2_caps->offloads.stripping_support;
4473 struct virtchnl_vlan_supported_caps *insertion_support =
4474 &vlan_v2_caps->offloads.insertion_support;
4476 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4477 stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4478 if (stripping_support->outer &
4479 VIRTCHNL_VLAN_ETHERTYPE_8100)
4480 hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4481 if (stripping_support->outer &
4482 VIRTCHNL_VLAN_ETHERTYPE_88A8)
4483 hw_features |= NETIF_F_HW_VLAN_STAG_RX;
4484 } else if (stripping_support->inner !=
4485 VIRTCHNL_VLAN_UNSUPPORTED &&
4486 stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4487 if (stripping_support->inner &
4488 VIRTCHNL_VLAN_ETHERTYPE_8100)
4489 hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4492 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4493 insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4494 if (insertion_support->outer &
4495 VIRTCHNL_VLAN_ETHERTYPE_8100)
4496 hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4497 if (insertion_support->outer &
4498 VIRTCHNL_VLAN_ETHERTYPE_88A8)
4499 hw_features |= NETIF_F_HW_VLAN_STAG_TX;
4500 } else if (insertion_support->inner &&
4501 insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4502 if (insertion_support->inner &
4503 VIRTCHNL_VLAN_ETHERTYPE_8100)
4504 hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4512 * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
4513 * @adapter: board private structure
4515 * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4516 * were negotiated determine the VLAN features that are enabled by default.
4518 static netdev_features_t
4519 iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
4521 netdev_features_t features = 0;
4523 if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4526 if (VLAN_ALLOWED(adapter)) {
4527 features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4528 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
4529 } else if (VLAN_V2_ALLOWED(adapter)) {
4530 struct virtchnl_vlan_caps *vlan_v2_caps =
4531 &adapter->vlan_v2_caps;
4532 struct virtchnl_vlan_supported_caps *filtering_support =
4533 &vlan_v2_caps->filtering.filtering_support;
4534 struct virtchnl_vlan_supported_caps *stripping_support =
4535 &vlan_v2_caps->offloads.stripping_support;
4536 struct virtchnl_vlan_supported_caps *insertion_support =
4537 &vlan_v2_caps->offloads.insertion_support;
4540 /* give priority to outer stripping and don't support both outer
4541 * and inner stripping
4543 ethertype_init = vlan_v2_caps->offloads.ethertype_init;
4544 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4545 if (stripping_support->outer &
4546 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4547 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4548 features |= NETIF_F_HW_VLAN_CTAG_RX;
4549 else if (stripping_support->outer &
4550 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4551 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4552 features |= NETIF_F_HW_VLAN_STAG_RX;
4553 } else if (stripping_support->inner !=
4554 VIRTCHNL_VLAN_UNSUPPORTED) {
4555 if (stripping_support->inner &
4556 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4557 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4558 features |= NETIF_F_HW_VLAN_CTAG_RX;
4561 /* give priority to outer insertion and don't support both outer
4562 * and inner insertion
4564 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4565 if (insertion_support->outer &
4566 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4567 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4568 features |= NETIF_F_HW_VLAN_CTAG_TX;
4569 else if (insertion_support->outer &
4570 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4571 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4572 features |= NETIF_F_HW_VLAN_STAG_TX;
4573 } else if (insertion_support->inner !=
4574 VIRTCHNL_VLAN_UNSUPPORTED) {
4575 if (insertion_support->inner &
4576 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4577 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4578 features |= NETIF_F_HW_VLAN_CTAG_TX;
4581 /* give priority to outer filtering and don't bother if both
4582 * outer and inner filtering are enabled
4584 ethertype_init = vlan_v2_caps->filtering.ethertype_init;
4585 if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4586 if (filtering_support->outer &
4587 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4588 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4589 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4590 if (filtering_support->outer &
4591 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4592 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4593 features |= NETIF_F_HW_VLAN_STAG_FILTER;
4594 } else if (filtering_support->inner !=
4595 VIRTCHNL_VLAN_UNSUPPORTED) {
4596 if (filtering_support->inner &
4597 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4598 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4599 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4600 if (filtering_support->inner &
4601 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4602 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4603 features |= NETIF_F_HW_VLAN_STAG_FILTER;
4610 #define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
4611 (!(((requested) & (feature_bit)) && \
4612 !((allowed) & (feature_bit))))
4615 * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
4616 * @adapter: board private structure
4617 * @requested_features: stack requested NETDEV features
4619 static netdev_features_t
4620 iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
4621 netdev_features_t requested_features)
4623 netdev_features_t allowed_features;
4625 allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
4626 iavf_get_netdev_vlan_features(adapter);
4628 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4630 NETIF_F_HW_VLAN_CTAG_TX))
4631 requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4633 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4635 NETIF_F_HW_VLAN_CTAG_RX))
4636 requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
4638 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4640 NETIF_F_HW_VLAN_STAG_TX))
4641 requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
4642 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4644 NETIF_F_HW_VLAN_STAG_RX))
4645 requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
4647 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4649 NETIF_F_HW_VLAN_CTAG_FILTER))
4650 requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4652 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4654 NETIF_F_HW_VLAN_STAG_FILTER))
4655 requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
4657 if ((requested_features &
4658 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
4659 (requested_features &
4660 (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
4661 adapter->vlan_v2_caps.offloads.ethertype_match ==
4662 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
4663 netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
4664 requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
4665 NETIF_F_HW_VLAN_STAG_TX);
4668 return requested_features;
4672 * iavf_fix_features - fix up the netdev feature bits
4673 * @netdev: our net device
4674 * @features: desired feature bits
4676 * Returns fixed-up features bits
4678 static netdev_features_t iavf_fix_features(struct net_device *netdev,
4679 netdev_features_t features)
4681 struct iavf_adapter *adapter = netdev_priv(netdev);
4683 return iavf_fix_netdev_vlan_features(adapter, features);
4686 static const struct net_device_ops iavf_netdev_ops = {
4687 .ndo_open = iavf_open,
4688 .ndo_stop = iavf_close,
4689 .ndo_start_xmit = iavf_xmit_frame,
4690 .ndo_set_rx_mode = iavf_set_rx_mode,
4691 .ndo_validate_addr = eth_validate_addr,
4692 .ndo_set_mac_address = iavf_set_mac,
4693 .ndo_change_mtu = iavf_change_mtu,
4694 .ndo_tx_timeout = iavf_tx_timeout,
4695 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid,
4696 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid,
4697 .ndo_features_check = iavf_features_check,
4698 .ndo_fix_features = iavf_fix_features,
4699 .ndo_set_features = iavf_set_features,
4700 .ndo_setup_tc = iavf_setup_tc,
4704 * iavf_check_reset_complete - check that VF reset is complete
4705 * @hw: pointer to hw struct
4707 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
4709 static int iavf_check_reset_complete(struct iavf_hw *hw)
4714 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
4715 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
4716 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
4717 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
4718 (rstat == VIRTCHNL_VFR_COMPLETED))
4720 usleep_range(10, 20);
4726 * iavf_process_config - Process the config information we got from the PF
4727 * @adapter: board private structure
4729 * Verify that we have a valid config struct, and set up our netdev features
4730 * and our VSI struct.
4732 int iavf_process_config(struct iavf_adapter *adapter)
4734 struct virtchnl_vf_resource *vfres = adapter->vf_res;
4735 netdev_features_t hw_vlan_features, vlan_features;
4736 struct net_device *netdev = adapter->netdev;
4737 netdev_features_t hw_enc_features;
4738 netdev_features_t hw_features;
4740 hw_enc_features = NETIF_F_SG |
4744 NETIF_F_SOFT_FEATURES |
4753 /* advertise to stack only if offloads for encapsulated packets is
4756 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
4757 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4759 NETIF_F_GSO_GRE_CSUM |
4760 NETIF_F_GSO_IPXIP4 |
4761 NETIF_F_GSO_IPXIP6 |
4762 NETIF_F_GSO_UDP_TUNNEL_CSUM |
4763 NETIF_F_GSO_PARTIAL |
4766 if (!(vfres->vf_cap_flags &
4767 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
4768 netdev->gso_partial_features |=
4769 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4771 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
4772 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
4773 netdev->hw_enc_features |= hw_enc_features;
4775 /* record features VLANs can make use of */
4776 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
4778 /* Write features and hw_features separately to avoid polluting
4779 * with, or dropping, features that are set when we registered.
4781 hw_features = hw_enc_features;
4783 /* get HW VLAN features that can be toggled */
4784 hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
4786 /* Enable cloud filter if ADQ is supported */
4787 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
4788 hw_features |= NETIF_F_HW_TC;
4789 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
4790 hw_features |= NETIF_F_GSO_UDP_L4;
4792 netdev->hw_features |= hw_features | hw_vlan_features;
4793 vlan_features = iavf_get_netdev_vlan_features(adapter);
4795 netdev->features |= hw_features | vlan_features;
4797 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
4798 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4800 netdev->priv_flags |= IFF_UNICAST_FLT;
4802 /* Do not turn on offloads when they are requested to be turned off.
4803 * TSO needs minimum 576 bytes to work correctly.
4805 if (netdev->wanted_features) {
4806 if (!(netdev->wanted_features & NETIF_F_TSO) ||
4808 netdev->features &= ~NETIF_F_TSO;
4809 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
4811 netdev->features &= ~NETIF_F_TSO6;
4812 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
4813 netdev->features &= ~NETIF_F_TSO_ECN;
4814 if (!(netdev->wanted_features & NETIF_F_GRO))
4815 netdev->features &= ~NETIF_F_GRO;
4816 if (!(netdev->wanted_features & NETIF_F_GSO))
4817 netdev->features &= ~NETIF_F_GSO;
4824 * iavf_shutdown - Shutdown the device in preparation for a reboot
4825 * @pdev: pci device structure
4827 static void iavf_shutdown(struct pci_dev *pdev)
4829 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
4830 struct net_device *netdev = adapter->netdev;
4832 netif_device_detach(netdev);
4834 if (netif_running(netdev))
4837 if (iavf_lock_timeout(&adapter->crit_lock, 5000))
4838 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
4839 /* Prevent the watchdog from running. */
4840 iavf_change_state(adapter, __IAVF_REMOVE);
4841 adapter->aq_required = 0;
4842 mutex_unlock(&adapter->crit_lock);
4845 pci_save_state(pdev);
4848 pci_disable_device(pdev);
4852 * iavf_probe - Device Initialization Routine
4853 * @pdev: PCI device information struct
4854 * @ent: entry in iavf_pci_tbl
4856 * Returns 0 on success, negative on failure
4858 * iavf_probe initializes an adapter identified by a pci_dev structure.
4859 * The OS initialization, configuring of the adapter private structure,
4860 * and a hardware reset occur.
4862 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4864 struct net_device *netdev;
4865 struct iavf_adapter *adapter = NULL;
4866 struct iavf_hw *hw = NULL;
4869 err = pci_enable_device(pdev);
4873 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4876 "DMA configuration failed: 0x%x\n", err);
4880 err = pci_request_regions(pdev, iavf_driver_name);
4883 "pci_request_regions failed 0x%x\n", err);
4887 pci_enable_pcie_error_reporting(pdev);
4889 pci_set_master(pdev);
4891 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
4892 IAVF_MAX_REQ_QUEUES);
4895 goto err_alloc_etherdev;
4898 SET_NETDEV_DEV(netdev, &pdev->dev);
4900 pci_set_drvdata(pdev, netdev);
4901 adapter = netdev_priv(netdev);
4903 adapter->netdev = netdev;
4904 adapter->pdev = pdev;
4909 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
4910 iavf_change_state(adapter, __IAVF_STARTUP);
4912 /* Call save state here because it relies on the adapter struct. */
4913 pci_save_state(pdev);
4915 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4916 pci_resource_len(pdev, 0));
4921 hw->vendor_id = pdev->vendor;
4922 hw->device_id = pdev->device;
4923 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4924 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4925 hw->subsystem_device_id = pdev->subsystem_device;
4926 hw->bus.device = PCI_SLOT(pdev->devfn);
4927 hw->bus.func = PCI_FUNC(pdev->devfn);
4928 hw->bus.bus_id = pdev->bus->number;
4930 /* set up the locks for the AQ, do this only once in probe
4931 * and destroy them only once in remove
4933 mutex_init(&adapter->crit_lock);
4934 mutex_init(&adapter->client_lock);
4935 mutex_init(&hw->aq.asq_mutex);
4936 mutex_init(&hw->aq.arq_mutex);
4938 spin_lock_init(&adapter->mac_vlan_list_lock);
4939 spin_lock_init(&adapter->cloud_filter_list_lock);
4940 spin_lock_init(&adapter->fdir_fltr_lock);
4941 spin_lock_init(&adapter->adv_rss_lock);
4943 INIT_LIST_HEAD(&adapter->mac_filter_list);
4944 INIT_LIST_HEAD(&adapter->vlan_filter_list);
4945 INIT_LIST_HEAD(&adapter->cloud_filter_list);
4946 INIT_LIST_HEAD(&adapter->fdir_list_head);
4947 INIT_LIST_HEAD(&adapter->adv_rss_list_head);
4949 INIT_WORK(&adapter->reset_task, iavf_reset_task);
4950 INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
4951 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
4952 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
4953 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
4954 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
4956 /* Setup the wait queue for indicating transition to down status */
4957 init_waitqueue_head(&adapter->down_waitqueue);
4959 /* Setup the wait queue for indicating virtchannel events */
4960 init_waitqueue_head(&adapter->vc_waitqueue);
4965 free_netdev(netdev);
4967 pci_disable_pcie_error_reporting(pdev);
4968 pci_release_regions(pdev);
4971 pci_disable_device(pdev);
4976 * iavf_suspend - Power management suspend routine
4977 * @dev_d: device info pointer
4979 * Called when the system (VM) is entering sleep/suspend.
4981 static int __maybe_unused iavf_suspend(struct device *dev_d)
4983 struct net_device *netdev = dev_get_drvdata(dev_d);
4984 struct iavf_adapter *adapter = netdev_priv(netdev);
4986 netif_device_detach(netdev);
4988 while (!mutex_trylock(&adapter->crit_lock))
4989 usleep_range(500, 1000);
4991 if (netif_running(netdev)) {
4996 iavf_free_misc_irq(adapter);
4997 iavf_reset_interrupt_capability(adapter);
4999 mutex_unlock(&adapter->crit_lock);
5005 * iavf_resume - Power management resume routine
5006 * @dev_d: device info pointer
5008 * Called when the system (VM) is resumed from sleep/suspend.
5010 static int __maybe_unused iavf_resume(struct device *dev_d)
5012 struct pci_dev *pdev = to_pci_dev(dev_d);
5013 struct iavf_adapter *adapter;
5016 adapter = iavf_pdev_to_adapter(pdev);
5018 pci_set_master(pdev);
5021 err = iavf_set_interrupt_capability(adapter);
5024 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
5027 err = iavf_request_misc_irq(adapter);
5030 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
5034 queue_work(iavf_wq, &adapter->reset_task);
5036 netif_device_attach(adapter->netdev);
5042 * iavf_remove - Device Removal Routine
5043 * @pdev: PCI device information struct
5045 * iavf_remove is called by the PCI subsystem to alert the driver
5046 * that it should release a PCI device. The could be caused by a
5047 * Hot-Plug event, or because the driver is going to be removed from
5050 static void iavf_remove(struct pci_dev *pdev)
5052 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
5053 struct net_device *netdev = adapter->netdev;
5054 struct iavf_fdir_fltr *fdir, *fdirtmp;
5055 struct iavf_vlan_filter *vlf, *vlftmp;
5056 struct iavf_adv_rss *rss, *rsstmp;
5057 struct iavf_mac_filter *f, *ftmp;
5058 struct iavf_cloud_filter *cf, *cftmp;
5059 struct iavf_hw *hw = &adapter->hw;
5062 /* When reboot/shutdown is in progress no need to do anything
5063 * as the adapter is already REMOVE state that was set during
5064 * iavf_shutdown() callback.
5066 if (adapter->state == __IAVF_REMOVE)
5069 set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
5070 /* Wait until port initialization is complete.
5071 * There are flows where register/unregister netdev may race.
5074 mutex_lock(&adapter->crit_lock);
5075 if (adapter->state == __IAVF_RUNNING ||
5076 adapter->state == __IAVF_DOWN ||
5077 adapter->state == __IAVF_INIT_FAILED) {
5078 mutex_unlock(&adapter->crit_lock);
5082 mutex_unlock(&adapter->crit_lock);
5083 usleep_range(500, 1000);
5085 cancel_delayed_work_sync(&adapter->watchdog_task);
5087 if (adapter->netdev_registered) {
5089 unregister_netdevice(netdev);
5090 adapter->netdev_registered = false;
5093 if (CLIENT_ALLOWED(adapter)) {
5094 err = iavf_lan_del_device(adapter);
5096 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
5100 mutex_lock(&adapter->crit_lock);
5101 dev_info(&adapter->pdev->dev, "Remove device\n");
5102 iavf_change_state(adapter, __IAVF_REMOVE);
5104 iavf_request_reset(adapter);
5106 /* If the FW isn't responding, kick it once, but only once. */
5107 if (!iavf_asq_done(hw)) {
5108 iavf_request_reset(adapter);
5112 iavf_misc_irq_disable(adapter);
5113 /* Shut down all the garbage mashers on the detention level */
5114 cancel_work_sync(&adapter->reset_task);
5115 cancel_delayed_work_sync(&adapter->watchdog_task);
5116 cancel_work_sync(&adapter->adminq_task);
5117 cancel_delayed_work_sync(&adapter->client_task);
5119 adapter->aq_required = 0;
5120 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
5122 iavf_free_all_tx_resources(adapter);
5123 iavf_free_all_rx_resources(adapter);
5124 iavf_free_misc_irq(adapter);
5126 iavf_reset_interrupt_capability(adapter);
5127 iavf_free_q_vectors(adapter);
5129 iavf_free_rss(adapter);
5131 if (hw->aq.asq.count)
5132 iavf_shutdown_adminq(hw);
5134 /* destroy the locks only once, here */
5135 mutex_destroy(&hw->aq.arq_mutex);
5136 mutex_destroy(&hw->aq.asq_mutex);
5137 mutex_destroy(&adapter->client_lock);
5138 mutex_unlock(&adapter->crit_lock);
5139 mutex_destroy(&adapter->crit_lock);
5141 iounmap(hw->hw_addr);
5142 pci_release_regions(pdev);
5143 iavf_free_queues(adapter);
5144 kfree(adapter->vf_res);
5145 spin_lock_bh(&adapter->mac_vlan_list_lock);
5146 /* If we got removed before an up/down sequence, we've got a filter
5147 * hanging out there that we need to get rid of.
5149 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
5153 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
5155 list_del(&vlf->list);
5159 spin_unlock_bh(&adapter->mac_vlan_list_lock);
5161 spin_lock_bh(&adapter->cloud_filter_list_lock);
5162 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
5163 list_del(&cf->list);
5166 spin_unlock_bh(&adapter->cloud_filter_list_lock);
5168 spin_lock_bh(&adapter->fdir_fltr_lock);
5169 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
5170 list_del(&fdir->list);
5173 spin_unlock_bh(&adapter->fdir_fltr_lock);
5175 spin_lock_bh(&adapter->adv_rss_lock);
5176 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
5178 list_del(&rss->list);
5181 spin_unlock_bh(&adapter->adv_rss_lock);
5183 free_netdev(netdev);
5185 pci_disable_pcie_error_reporting(pdev);
5187 pci_disable_device(pdev);
5190 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
5192 static struct pci_driver iavf_driver = {
5193 .name = iavf_driver_name,
5194 .id_table = iavf_pci_tbl,
5195 .probe = iavf_probe,
5196 .remove = iavf_remove,
5197 .driver.pm = &iavf_pm_ops,
5198 .shutdown = iavf_shutdown,
5202 * iavf_init_module - Driver Registration Routine
5204 * iavf_init_module is the first routine called when the driver is
5205 * loaded. All it does is register with the PCI subsystem.
5207 static int __init iavf_init_module(void)
5209 pr_info("iavf: %s\n", iavf_driver_string);
5211 pr_info("%s\n", iavf_copyright);
5213 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
5216 pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
5219 return pci_register_driver(&iavf_driver);
5222 module_init(iavf_init_module);
5225 * iavf_exit_module - Driver Exit Cleanup Routine
5227 * iavf_exit_module is called just before the driver is removed
5230 static void __exit iavf_exit_module(void)
5232 pci_unregister_driver(&iavf_driver);
5233 destroy_workqueue(iavf_wq);
5236 module_exit(iavf_exit_module);