iavf: Do not restart Tx queues after reset task failure
[linux-2.6-microblaze.git] / drivers / net / ethernet / intel / iavf / iavf_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include "iavf.h"
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 /* All iavf tracepoints are defined by the include below, which must
8  * be included exactly once across the whole kernel with
9  * CREATE_TRACE_POINTS defined
10  */
11 #define CREATE_TRACE_POINTS
12 #include "iavf_trace.h"
13
14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16 static int iavf_close(struct net_device *netdev);
17 static void iavf_init_get_resources(struct iavf_adapter *adapter);
18 static int iavf_check_reset_complete(struct iavf_hw *hw);
19
20 char iavf_driver_name[] = "iavf";
21 static const char iavf_driver_string[] =
22         "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
23
24 static const char iavf_copyright[] =
25         "Copyright (c) 2013 - 2018 Intel Corporation.";
26
27 /* iavf_pci_tbl - PCI Device ID Table
28  *
29  * Wildcard entries (PCI_ANY_ID) should come last
30  * Last entry must be all 0s
31  *
32  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
33  *   Class, Class Mask, private data (not used) }
34  */
35 static const struct pci_device_id iavf_pci_tbl[] = {
36         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
37         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
38         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
39         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
40         /* required last entry */
41         {0, }
42 };
43
44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
45
46 MODULE_ALIAS("i40evf");
47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
49 MODULE_LICENSE("GPL v2");
50
51 static const struct net_device_ops iavf_netdev_ops;
52 struct workqueue_struct *iavf_wq;
53
54 int iavf_status_to_errno(enum iavf_status status)
55 {
56         switch (status) {
57         case IAVF_SUCCESS:
58                 return 0;
59         case IAVF_ERR_PARAM:
60         case IAVF_ERR_MAC_TYPE:
61         case IAVF_ERR_INVALID_MAC_ADDR:
62         case IAVF_ERR_INVALID_LINK_SETTINGS:
63         case IAVF_ERR_INVALID_PD_ID:
64         case IAVF_ERR_INVALID_QP_ID:
65         case IAVF_ERR_INVALID_CQ_ID:
66         case IAVF_ERR_INVALID_CEQ_ID:
67         case IAVF_ERR_INVALID_AEQ_ID:
68         case IAVF_ERR_INVALID_SIZE:
69         case IAVF_ERR_INVALID_ARP_INDEX:
70         case IAVF_ERR_INVALID_FPM_FUNC_ID:
71         case IAVF_ERR_QP_INVALID_MSG_SIZE:
72         case IAVF_ERR_INVALID_FRAG_COUNT:
73         case IAVF_ERR_INVALID_ALIGNMENT:
74         case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
75         case IAVF_ERR_INVALID_IMM_DATA_SIZE:
76         case IAVF_ERR_INVALID_VF_ID:
77         case IAVF_ERR_INVALID_HMCFN_ID:
78         case IAVF_ERR_INVALID_PBLE_INDEX:
79         case IAVF_ERR_INVALID_SD_INDEX:
80         case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
81         case IAVF_ERR_INVALID_SD_TYPE:
82         case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
83         case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
84         case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
85                 return -EINVAL;
86         case IAVF_ERR_NVM:
87         case IAVF_ERR_NVM_CHECKSUM:
88         case IAVF_ERR_PHY:
89         case IAVF_ERR_CONFIG:
90         case IAVF_ERR_UNKNOWN_PHY:
91         case IAVF_ERR_LINK_SETUP:
92         case IAVF_ERR_ADAPTER_STOPPED:
93         case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
94         case IAVF_ERR_AUTONEG_NOT_COMPLETE:
95         case IAVF_ERR_RESET_FAILED:
96         case IAVF_ERR_BAD_PTR:
97         case IAVF_ERR_SWFW_SYNC:
98         case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
99         case IAVF_ERR_QUEUE_EMPTY:
100         case IAVF_ERR_FLUSHED_QUEUE:
101         case IAVF_ERR_OPCODE_MISMATCH:
102         case IAVF_ERR_CQP_COMPL_ERROR:
103         case IAVF_ERR_BACKING_PAGE_ERROR:
104         case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
105         case IAVF_ERR_MEMCPY_FAILED:
106         case IAVF_ERR_SRQ_ENABLED:
107         case IAVF_ERR_ADMIN_QUEUE_ERROR:
108         case IAVF_ERR_ADMIN_QUEUE_FULL:
109         case IAVF_ERR_BAD_IWARP_CQE:
110         case IAVF_ERR_NVM_BLANK_MODE:
111         case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
112         case IAVF_ERR_DIAG_TEST_FAILED:
113         case IAVF_ERR_FIRMWARE_API_VERSION:
114         case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
115                 return -EIO;
116         case IAVF_ERR_DEVICE_NOT_SUPPORTED:
117                 return -ENODEV;
118         case IAVF_ERR_NO_AVAILABLE_VSI:
119         case IAVF_ERR_RING_FULL:
120                 return -ENOSPC;
121         case IAVF_ERR_NO_MEMORY:
122                 return -ENOMEM;
123         case IAVF_ERR_TIMEOUT:
124         case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
125                 return -ETIMEDOUT;
126         case IAVF_ERR_NOT_IMPLEMENTED:
127         case IAVF_NOT_SUPPORTED:
128                 return -EOPNOTSUPP;
129         case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
130                 return -EALREADY;
131         case IAVF_ERR_NOT_READY:
132                 return -EBUSY;
133         case IAVF_ERR_BUF_TOO_SHORT:
134                 return -EMSGSIZE;
135         }
136
137         return -EIO;
138 }
139
140 int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
141 {
142         switch (v_status) {
143         case VIRTCHNL_STATUS_SUCCESS:
144                 return 0;
145         case VIRTCHNL_STATUS_ERR_PARAM:
146         case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
147                 return -EINVAL;
148         case VIRTCHNL_STATUS_ERR_NO_MEMORY:
149                 return -ENOMEM;
150         case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
151         case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
152         case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
153                 return -EIO;
154         case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
155                 return -EOPNOTSUPP;
156         }
157
158         return -EIO;
159 }
160
161 /**
162  * iavf_pdev_to_adapter - go from pci_dev to adapter
163  * @pdev: pci_dev pointer
164  */
165 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
166 {
167         return netdev_priv(pci_get_drvdata(pdev));
168 }
169
170 /**
171  * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
172  * @hw:   pointer to the HW structure
173  * @mem:  ptr to mem struct to fill out
174  * @size: size of memory requested
175  * @alignment: what to align the allocation to
176  **/
177 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
178                                          struct iavf_dma_mem *mem,
179                                          u64 size, u32 alignment)
180 {
181         struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
182
183         if (!mem)
184                 return IAVF_ERR_PARAM;
185
186         mem->size = ALIGN(size, alignment);
187         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
188                                      (dma_addr_t *)&mem->pa, GFP_KERNEL);
189         if (mem->va)
190                 return 0;
191         else
192                 return IAVF_ERR_NO_MEMORY;
193 }
194
195 /**
196  * iavf_free_dma_mem_d - OS specific memory free for shared code
197  * @hw:   pointer to the HW structure
198  * @mem:  ptr to mem struct to free
199  **/
200 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
201                                      struct iavf_dma_mem *mem)
202 {
203         struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
204
205         if (!mem || !mem->va)
206                 return IAVF_ERR_PARAM;
207         dma_free_coherent(&adapter->pdev->dev, mem->size,
208                           mem->va, (dma_addr_t)mem->pa);
209         return 0;
210 }
211
212 /**
213  * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
214  * @hw:   pointer to the HW structure
215  * @mem:  ptr to mem struct to fill out
216  * @size: size of memory requested
217  **/
218 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
219                                           struct iavf_virt_mem *mem, u32 size)
220 {
221         if (!mem)
222                 return IAVF_ERR_PARAM;
223
224         mem->size = size;
225         mem->va = kzalloc(size, GFP_KERNEL);
226
227         if (mem->va)
228                 return 0;
229         else
230                 return IAVF_ERR_NO_MEMORY;
231 }
232
233 /**
234  * iavf_free_virt_mem_d - OS specific memory free for shared code
235  * @hw:   pointer to the HW structure
236  * @mem:  ptr to mem struct to free
237  **/
238 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
239                                       struct iavf_virt_mem *mem)
240 {
241         if (!mem)
242                 return IAVF_ERR_PARAM;
243
244         /* it's ok to kfree a NULL pointer */
245         kfree(mem->va);
246
247         return 0;
248 }
249
250 /**
251  * iavf_lock_timeout - try to lock mutex but give up after timeout
252  * @lock: mutex that should be locked
253  * @msecs: timeout in msecs
254  *
255  * Returns 0 on success, negative on failure
256  **/
257 int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
258 {
259         unsigned int wait, delay = 10;
260
261         for (wait = 0; wait < msecs; wait += delay) {
262                 if (mutex_trylock(lock))
263                         return 0;
264
265                 msleep(delay);
266         }
267
268         return -1;
269 }
270
271 /**
272  * iavf_schedule_reset - Set the flags and schedule a reset event
273  * @adapter: board private structure
274  **/
275 void iavf_schedule_reset(struct iavf_adapter *adapter)
276 {
277         if (!(adapter->flags &
278               (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
279                 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
280                 queue_work(iavf_wq, &adapter->reset_task);
281         }
282 }
283
284 /**
285  * iavf_schedule_request_stats - Set the flags and schedule statistics request
286  * @adapter: board private structure
287  *
288  * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
289  * request and refresh ethtool stats
290  **/
291 void iavf_schedule_request_stats(struct iavf_adapter *adapter)
292 {
293         adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
294         mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
295 }
296
297 /**
298  * iavf_tx_timeout - Respond to a Tx Hang
299  * @netdev: network interface device structure
300  * @txqueue: queue number that is timing out
301  **/
302 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
303 {
304         struct iavf_adapter *adapter = netdev_priv(netdev);
305
306         adapter->tx_timeout_count++;
307         iavf_schedule_reset(adapter);
308 }
309
310 /**
311  * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
312  * @adapter: board private structure
313  **/
314 static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
315 {
316         struct iavf_hw *hw = &adapter->hw;
317
318         if (!adapter->msix_entries)
319                 return;
320
321         wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
322
323         iavf_flush(hw);
324
325         synchronize_irq(adapter->msix_entries[0].vector);
326 }
327
328 /**
329  * iavf_misc_irq_enable - Enable default interrupt generation settings
330  * @adapter: board private structure
331  **/
332 static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
333 {
334         struct iavf_hw *hw = &adapter->hw;
335
336         wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
337                                        IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
338         wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
339
340         iavf_flush(hw);
341 }
342
343 /**
344  * iavf_irq_disable - Mask off interrupt generation on the NIC
345  * @adapter: board private structure
346  **/
347 static void iavf_irq_disable(struct iavf_adapter *adapter)
348 {
349         int i;
350         struct iavf_hw *hw = &adapter->hw;
351
352         if (!adapter->msix_entries)
353                 return;
354
355         for (i = 1; i < adapter->num_msix_vectors; i++) {
356                 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
357                 synchronize_irq(adapter->msix_entries[i].vector);
358         }
359         iavf_flush(hw);
360 }
361
362 /**
363  * iavf_irq_enable_queues - Enable interrupt for specified queues
364  * @adapter: board private structure
365  * @mask: bitmap of queues to enable
366  **/
367 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
368 {
369         struct iavf_hw *hw = &adapter->hw;
370         int i;
371
372         for (i = 1; i < adapter->num_msix_vectors; i++) {
373                 if (mask & BIT(i - 1)) {
374                         wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
375                              IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
376                              IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
377                 }
378         }
379 }
380
381 /**
382  * iavf_irq_enable - Enable default interrupt generation settings
383  * @adapter: board private structure
384  * @flush: boolean value whether to run rd32()
385  **/
386 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
387 {
388         struct iavf_hw *hw = &adapter->hw;
389
390         iavf_misc_irq_enable(adapter);
391         iavf_irq_enable_queues(adapter, ~0);
392
393         if (flush)
394                 iavf_flush(hw);
395 }
396
397 /**
398  * iavf_msix_aq - Interrupt handler for vector 0
399  * @irq: interrupt number
400  * @data: pointer to netdev
401  **/
402 static irqreturn_t iavf_msix_aq(int irq, void *data)
403 {
404         struct net_device *netdev = data;
405         struct iavf_adapter *adapter = netdev_priv(netdev);
406         struct iavf_hw *hw = &adapter->hw;
407
408         /* handle non-queue interrupts, these reads clear the registers */
409         rd32(hw, IAVF_VFINT_ICR01);
410         rd32(hw, IAVF_VFINT_ICR0_ENA1);
411
412         if (adapter->state != __IAVF_REMOVE)
413                 /* schedule work on the private workqueue */
414                 queue_work(iavf_wq, &adapter->adminq_task);
415
416         return IRQ_HANDLED;
417 }
418
419 /**
420  * iavf_msix_clean_rings - MSIX mode Interrupt Handler
421  * @irq: interrupt number
422  * @data: pointer to a q_vector
423  **/
424 static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
425 {
426         struct iavf_q_vector *q_vector = data;
427
428         if (!q_vector->tx.ring && !q_vector->rx.ring)
429                 return IRQ_HANDLED;
430
431         napi_schedule_irqoff(&q_vector->napi);
432
433         return IRQ_HANDLED;
434 }
435
436 /**
437  * iavf_map_vector_to_rxq - associate irqs with rx queues
438  * @adapter: board private structure
439  * @v_idx: interrupt number
440  * @r_idx: queue number
441  **/
442 static void
443 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
444 {
445         struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
446         struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
447         struct iavf_hw *hw = &adapter->hw;
448
449         rx_ring->q_vector = q_vector;
450         rx_ring->next = q_vector->rx.ring;
451         rx_ring->vsi = &adapter->vsi;
452         q_vector->rx.ring = rx_ring;
453         q_vector->rx.count++;
454         q_vector->rx.next_update = jiffies + 1;
455         q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
456         q_vector->ring_mask |= BIT(r_idx);
457         wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
458              q_vector->rx.current_itr >> 1);
459         q_vector->rx.current_itr = q_vector->rx.target_itr;
460 }
461
462 /**
463  * iavf_map_vector_to_txq - associate irqs with tx queues
464  * @adapter: board private structure
465  * @v_idx: interrupt number
466  * @t_idx: queue number
467  **/
468 static void
469 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
470 {
471         struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
472         struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
473         struct iavf_hw *hw = &adapter->hw;
474
475         tx_ring->q_vector = q_vector;
476         tx_ring->next = q_vector->tx.ring;
477         tx_ring->vsi = &adapter->vsi;
478         q_vector->tx.ring = tx_ring;
479         q_vector->tx.count++;
480         q_vector->tx.next_update = jiffies + 1;
481         q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
482         q_vector->num_ringpairs++;
483         wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
484              q_vector->tx.target_itr >> 1);
485         q_vector->tx.current_itr = q_vector->tx.target_itr;
486 }
487
488 /**
489  * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
490  * @adapter: board private structure to initialize
491  *
492  * This function maps descriptor rings to the queue-specific vectors
493  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
494  * one vector per ring/queue, but on a constrained vector budget, we
495  * group the rings as "efficiently" as possible.  You would add new
496  * mapping configurations in here.
497  **/
498 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
499 {
500         int rings_remaining = adapter->num_active_queues;
501         int ridx = 0, vidx = 0;
502         int q_vectors;
503
504         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
505
506         for (; ridx < rings_remaining; ridx++) {
507                 iavf_map_vector_to_rxq(adapter, vidx, ridx);
508                 iavf_map_vector_to_txq(adapter, vidx, ridx);
509
510                 /* In the case where we have more queues than vectors, continue
511                  * round-robin on vectors until all queues are mapped.
512                  */
513                 if (++vidx >= q_vectors)
514                         vidx = 0;
515         }
516
517         adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
518 }
519
520 /**
521  * iavf_irq_affinity_notify - Callback for affinity changes
522  * @notify: context as to what irq was changed
523  * @mask: the new affinity mask
524  *
525  * This is a callback function used by the irq_set_affinity_notifier function
526  * so that we may register to receive changes to the irq affinity masks.
527  **/
528 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
529                                      const cpumask_t *mask)
530 {
531         struct iavf_q_vector *q_vector =
532                 container_of(notify, struct iavf_q_vector, affinity_notify);
533
534         cpumask_copy(&q_vector->affinity_mask, mask);
535 }
536
537 /**
538  * iavf_irq_affinity_release - Callback for affinity notifier release
539  * @ref: internal core kernel usage
540  *
541  * This is a callback function used by the irq_set_affinity_notifier function
542  * to inform the current notification subscriber that they will no longer
543  * receive notifications.
544  **/
545 static void iavf_irq_affinity_release(struct kref *ref) {}
546
547 /**
548  * iavf_request_traffic_irqs - Initialize MSI-X interrupts
549  * @adapter: board private structure
550  * @basename: device basename
551  *
552  * Allocates MSI-X vectors for tx and rx handling, and requests
553  * interrupts from the kernel.
554  **/
555 static int
556 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
557 {
558         unsigned int vector, q_vectors;
559         unsigned int rx_int_idx = 0, tx_int_idx = 0;
560         int irq_num, err;
561         int cpu;
562
563         iavf_irq_disable(adapter);
564         /* Decrement for Other and TCP Timer vectors */
565         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
566
567         for (vector = 0; vector < q_vectors; vector++) {
568                 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
569
570                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
571
572                 if (q_vector->tx.ring && q_vector->rx.ring) {
573                         snprintf(q_vector->name, sizeof(q_vector->name),
574                                  "iavf-%s-TxRx-%u", basename, rx_int_idx++);
575                         tx_int_idx++;
576                 } else if (q_vector->rx.ring) {
577                         snprintf(q_vector->name, sizeof(q_vector->name),
578                                  "iavf-%s-rx-%u", basename, rx_int_idx++);
579                 } else if (q_vector->tx.ring) {
580                         snprintf(q_vector->name, sizeof(q_vector->name),
581                                  "iavf-%s-tx-%u", basename, tx_int_idx++);
582                 } else {
583                         /* skip this unused q_vector */
584                         continue;
585                 }
586                 err = request_irq(irq_num,
587                                   iavf_msix_clean_rings,
588                                   0,
589                                   q_vector->name,
590                                   q_vector);
591                 if (err) {
592                         dev_info(&adapter->pdev->dev,
593                                  "Request_irq failed, error: %d\n", err);
594                         goto free_queue_irqs;
595                 }
596                 /* register for affinity change notifications */
597                 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
598                 q_vector->affinity_notify.release =
599                                                    iavf_irq_affinity_release;
600                 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
601                 /* Spread the IRQ affinity hints across online CPUs. Note that
602                  * get_cpu_mask returns a mask with a permanent lifetime so
603                  * it's safe to use as a hint for irq_update_affinity_hint.
604                  */
605                 cpu = cpumask_local_spread(q_vector->v_idx, -1);
606                 irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
607         }
608
609         return 0;
610
611 free_queue_irqs:
612         while (vector) {
613                 vector--;
614                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
615                 irq_set_affinity_notifier(irq_num, NULL);
616                 irq_update_affinity_hint(irq_num, NULL);
617                 free_irq(irq_num, &adapter->q_vectors[vector]);
618         }
619         return err;
620 }
621
622 /**
623  * iavf_request_misc_irq - Initialize MSI-X interrupts
624  * @adapter: board private structure
625  *
626  * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
627  * vector is only for the admin queue, and stays active even when the netdev
628  * is closed.
629  **/
630 static int iavf_request_misc_irq(struct iavf_adapter *adapter)
631 {
632         struct net_device *netdev = adapter->netdev;
633         int err;
634
635         snprintf(adapter->misc_vector_name,
636                  sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
637                  dev_name(&adapter->pdev->dev));
638         err = request_irq(adapter->msix_entries[0].vector,
639                           &iavf_msix_aq, 0,
640                           adapter->misc_vector_name, netdev);
641         if (err) {
642                 dev_err(&adapter->pdev->dev,
643                         "request_irq for %s failed: %d\n",
644                         adapter->misc_vector_name, err);
645                 free_irq(adapter->msix_entries[0].vector, netdev);
646         }
647         return err;
648 }
649
650 /**
651  * iavf_free_traffic_irqs - Free MSI-X interrupts
652  * @adapter: board private structure
653  *
654  * Frees all MSI-X vectors other than 0.
655  **/
656 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
657 {
658         int vector, irq_num, q_vectors;
659
660         if (!adapter->msix_entries)
661                 return;
662
663         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
664
665         for (vector = 0; vector < q_vectors; vector++) {
666                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
667                 irq_set_affinity_notifier(irq_num, NULL);
668                 irq_update_affinity_hint(irq_num, NULL);
669                 free_irq(irq_num, &adapter->q_vectors[vector]);
670         }
671 }
672
673 /**
674  * iavf_free_misc_irq - Free MSI-X miscellaneous vector
675  * @adapter: board private structure
676  *
677  * Frees MSI-X vector 0.
678  **/
679 static void iavf_free_misc_irq(struct iavf_adapter *adapter)
680 {
681         struct net_device *netdev = adapter->netdev;
682
683         if (!adapter->msix_entries)
684                 return;
685
686         free_irq(adapter->msix_entries[0].vector, netdev);
687 }
688
689 /**
690  * iavf_configure_tx - Configure Transmit Unit after Reset
691  * @adapter: board private structure
692  *
693  * Configure the Tx unit of the MAC after a reset.
694  **/
695 static void iavf_configure_tx(struct iavf_adapter *adapter)
696 {
697         struct iavf_hw *hw = &adapter->hw;
698         int i;
699
700         for (i = 0; i < adapter->num_active_queues; i++)
701                 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
702 }
703
704 /**
705  * iavf_configure_rx - Configure Receive Unit after Reset
706  * @adapter: board private structure
707  *
708  * Configure the Rx unit of the MAC after a reset.
709  **/
710 static void iavf_configure_rx(struct iavf_adapter *adapter)
711 {
712         unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
713         struct iavf_hw *hw = &adapter->hw;
714         int i;
715
716         /* Legacy Rx will always default to a 2048 buffer size. */
717 #if (PAGE_SIZE < 8192)
718         if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
719                 struct net_device *netdev = adapter->netdev;
720
721                 /* For jumbo frames on systems with 4K pages we have to use
722                  * an order 1 page, so we might as well increase the size
723                  * of our Rx buffer to make better use of the available space
724                  */
725                 rx_buf_len = IAVF_RXBUFFER_3072;
726
727                 /* We use a 1536 buffer size for configurations with
728                  * standard Ethernet mtu.  On x86 this gives us enough room
729                  * for shared info and 192 bytes of padding.
730                  */
731                 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
732                     (netdev->mtu <= ETH_DATA_LEN))
733                         rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
734         }
735 #endif
736
737         for (i = 0; i < adapter->num_active_queues; i++) {
738                 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
739                 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
740
741                 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
742                         clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
743                 else
744                         set_ring_build_skb_enabled(&adapter->rx_rings[i]);
745         }
746 }
747
748 /**
749  * iavf_find_vlan - Search filter list for specific vlan filter
750  * @adapter: board private structure
751  * @vlan: vlan tag
752  *
753  * Returns ptr to the filter object or NULL. Must be called while holding the
754  * mac_vlan_list_lock.
755  **/
756 static struct
757 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
758                                  struct iavf_vlan vlan)
759 {
760         struct iavf_vlan_filter *f;
761
762         list_for_each_entry(f, &adapter->vlan_filter_list, list) {
763                 if (f->vlan.vid == vlan.vid &&
764                     f->vlan.tpid == vlan.tpid)
765                         return f;
766         }
767
768         return NULL;
769 }
770
771 /**
772  * iavf_add_vlan - Add a vlan filter to the list
773  * @adapter: board private structure
774  * @vlan: VLAN tag
775  *
776  * Returns ptr to the filter object or NULL when no memory available.
777  **/
778 static struct
779 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
780                                 struct iavf_vlan vlan)
781 {
782         struct iavf_vlan_filter *f = NULL;
783
784         spin_lock_bh(&adapter->mac_vlan_list_lock);
785
786         f = iavf_find_vlan(adapter, vlan);
787         if (!f) {
788                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
789                 if (!f)
790                         goto clearout;
791
792                 f->vlan = vlan;
793
794                 list_add_tail(&f->list, &adapter->vlan_filter_list);
795                 f->add = true;
796                 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
797         }
798
799 clearout:
800         spin_unlock_bh(&adapter->mac_vlan_list_lock);
801         return f;
802 }
803
804 /**
805  * iavf_del_vlan - Remove a vlan filter from the list
806  * @adapter: board private structure
807  * @vlan: VLAN tag
808  **/
809 static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
810 {
811         struct iavf_vlan_filter *f;
812
813         spin_lock_bh(&adapter->mac_vlan_list_lock);
814
815         f = iavf_find_vlan(adapter, vlan);
816         if (f) {
817                 f->remove = true;
818                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
819         }
820
821         spin_unlock_bh(&adapter->mac_vlan_list_lock);
822 }
823
824 /**
825  * iavf_restore_filters
826  * @adapter: board private structure
827  *
828  * Restore existing non MAC filters when VF netdev comes back up
829  **/
830 static void iavf_restore_filters(struct iavf_adapter *adapter)
831 {
832         u16 vid;
833
834         /* re-add all VLAN filters */
835         for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
836                 iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
837
838         for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
839                 iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
840 }
841
842 /**
843  * iavf_get_num_vlans_added - get number of VLANs added
844  * @adapter: board private structure
845  */
846 u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
847 {
848         return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
849                 bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
850 }
851
852 /**
853  * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
854  * @adapter: board private structure
855  *
856  * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
857  * do not impose a limit as that maintains current behavior and for
858  * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
859  **/
860 static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
861 {
862         /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
863          * never been a limit on the VF driver side
864          */
865         if (VLAN_ALLOWED(adapter))
866                 return VLAN_N_VID;
867         else if (VLAN_V2_ALLOWED(adapter))
868                 return adapter->vlan_v2_caps.filtering.max_filters;
869
870         return 0;
871 }
872
873 /**
874  * iavf_max_vlans_added - check if maximum VLANs allowed already exist
875  * @adapter: board private structure
876  **/
877 static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
878 {
879         if (iavf_get_num_vlans_added(adapter) <
880             iavf_get_max_vlans_allowed(adapter))
881                 return false;
882
883         return true;
884 }
885
886 /**
887  * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
888  * @netdev: network device struct
889  * @proto: unused protocol data
890  * @vid: VLAN tag
891  **/
892 static int iavf_vlan_rx_add_vid(struct net_device *netdev,
893                                 __always_unused __be16 proto, u16 vid)
894 {
895         struct iavf_adapter *adapter = netdev_priv(netdev);
896
897         if (!VLAN_FILTERING_ALLOWED(adapter))
898                 return -EIO;
899
900         if (iavf_max_vlans_added(adapter)) {
901                 netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
902                            iavf_get_max_vlans_allowed(adapter));
903                 return -EIO;
904         }
905
906         if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
907                 return -ENOMEM;
908
909         return 0;
910 }
911
912 /**
913  * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
914  * @netdev: network device struct
915  * @proto: unused protocol data
916  * @vid: VLAN tag
917  **/
918 static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
919                                  __always_unused __be16 proto, u16 vid)
920 {
921         struct iavf_adapter *adapter = netdev_priv(netdev);
922
923         iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
924         if (proto == cpu_to_be16(ETH_P_8021Q))
925                 clear_bit(vid, adapter->vsi.active_cvlans);
926         else
927                 clear_bit(vid, adapter->vsi.active_svlans);
928
929         return 0;
930 }
931
932 /**
933  * iavf_find_filter - Search filter list for specific mac filter
934  * @adapter: board private structure
935  * @macaddr: the MAC address
936  *
937  * Returns ptr to the filter object or NULL. Must be called while holding the
938  * mac_vlan_list_lock.
939  **/
940 static struct
941 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
942                                   const u8 *macaddr)
943 {
944         struct iavf_mac_filter *f;
945
946         if (!macaddr)
947                 return NULL;
948
949         list_for_each_entry(f, &adapter->mac_filter_list, list) {
950                 if (ether_addr_equal(macaddr, f->macaddr))
951                         return f;
952         }
953         return NULL;
954 }
955
956 /**
957  * iavf_add_filter - Add a mac filter to the filter list
958  * @adapter: board private structure
959  * @macaddr: the MAC address
960  *
961  * Returns ptr to the filter object or NULL when no memory available.
962  **/
963 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
964                                         const u8 *macaddr)
965 {
966         struct iavf_mac_filter *f;
967
968         if (!macaddr)
969                 return NULL;
970
971         f = iavf_find_filter(adapter, macaddr);
972         if (!f) {
973                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
974                 if (!f)
975                         return f;
976
977                 ether_addr_copy(f->macaddr, macaddr);
978
979                 list_add_tail(&f->list, &adapter->mac_filter_list);
980                 f->add = true;
981                 f->add_handled = false;
982                 f->is_new_mac = true;
983                 f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
984                 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
985         } else {
986                 f->remove = false;
987         }
988
989         return f;
990 }
991
992 /**
993  * iavf_replace_primary_mac - Replace current primary address
994  * @adapter: board private structure
995  * @new_mac: new MAC address to be applied
996  *
997  * Replace current dev_addr and send request to PF for removal of previous
998  * primary MAC address filter and addition of new primary MAC filter.
999  * Return 0 for success, -ENOMEM for failure.
1000  *
1001  * Do not call this with mac_vlan_list_lock!
1002  **/
1003 int iavf_replace_primary_mac(struct iavf_adapter *adapter,
1004                              const u8 *new_mac)
1005 {
1006         struct iavf_hw *hw = &adapter->hw;
1007         struct iavf_mac_filter *f;
1008
1009         spin_lock_bh(&adapter->mac_vlan_list_lock);
1010
1011         list_for_each_entry(f, &adapter->mac_filter_list, list) {
1012                 f->is_primary = false;
1013         }
1014
1015         f = iavf_find_filter(adapter, hw->mac.addr);
1016         if (f) {
1017                 f->remove = true;
1018                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1019         }
1020
1021         f = iavf_add_filter(adapter, new_mac);
1022
1023         if (f) {
1024                 /* Always send the request to add if changing primary MAC
1025                  * even if filter is already present on the list
1026                  */
1027                 f->is_primary = true;
1028                 f->add = true;
1029                 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
1030                 ether_addr_copy(hw->mac.addr, new_mac);
1031         }
1032
1033         spin_unlock_bh(&adapter->mac_vlan_list_lock);
1034
1035         /* schedule the watchdog task to immediately process the request */
1036         if (f) {
1037                 queue_work(iavf_wq, &adapter->watchdog_task.work);
1038                 return 0;
1039         }
1040         return -ENOMEM;
1041 }
1042
1043 /**
1044  * iavf_is_mac_set_handled - wait for a response to set MAC from PF
1045  * @netdev: network interface device structure
1046  * @macaddr: MAC address to set
1047  *
1048  * Returns true on success, false on failure
1049  */
1050 static bool iavf_is_mac_set_handled(struct net_device *netdev,
1051                                     const u8 *macaddr)
1052 {
1053         struct iavf_adapter *adapter = netdev_priv(netdev);
1054         struct iavf_mac_filter *f;
1055         bool ret = false;
1056
1057         spin_lock_bh(&adapter->mac_vlan_list_lock);
1058
1059         f = iavf_find_filter(adapter, macaddr);
1060
1061         if (!f || (!f->add && f->add_handled))
1062                 ret = true;
1063
1064         spin_unlock_bh(&adapter->mac_vlan_list_lock);
1065
1066         return ret;
1067 }
1068
1069 /**
1070  * iavf_set_mac - NDO callback to set port MAC address
1071  * @netdev: network interface device structure
1072  * @p: pointer to an address structure
1073  *
1074  * Returns 0 on success, negative on failure
1075  */
1076 static int iavf_set_mac(struct net_device *netdev, void *p)
1077 {
1078         struct iavf_adapter *adapter = netdev_priv(netdev);
1079         struct sockaddr *addr = p;
1080         int ret;
1081
1082         if (!is_valid_ether_addr(addr->sa_data))
1083                 return -EADDRNOTAVAIL;
1084
1085         ret = iavf_replace_primary_mac(adapter, addr->sa_data);
1086
1087         if (ret)
1088                 return ret;
1089
1090         /* If this is an initial set MAC during VF spawn do not wait */
1091         if (adapter->flags & IAVF_FLAG_INITIAL_MAC_SET) {
1092                 adapter->flags &= ~IAVF_FLAG_INITIAL_MAC_SET;
1093                 return 0;
1094         }
1095
1096         ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
1097                                                iavf_is_mac_set_handled(netdev, addr->sa_data),
1098                                                msecs_to_jiffies(2500));
1099
1100         /* If ret < 0 then it means wait was interrupted.
1101          * If ret == 0 then it means we got a timeout.
1102          * else it means we got response for set MAC from PF,
1103          * check if netdev MAC was updated to requested MAC,
1104          * if yes then set MAC succeeded otherwise it failed return -EACCES
1105          */
1106         if (ret < 0)
1107                 return ret;
1108
1109         if (!ret)
1110                 return -EAGAIN;
1111
1112         if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
1113                 return -EACCES;
1114
1115         return 0;
1116 }
1117
1118 /**
1119  * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
1120  * @netdev: the netdevice
1121  * @addr: address to add
1122  *
1123  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1124  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1125  */
1126 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
1127 {
1128         struct iavf_adapter *adapter = netdev_priv(netdev);
1129
1130         if (iavf_add_filter(adapter, addr))
1131                 return 0;
1132         else
1133                 return -ENOMEM;
1134 }
1135
1136 /**
1137  * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1138  * @netdev: the netdevice
1139  * @addr: address to add
1140  *
1141  * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1142  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1143  */
1144 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
1145 {
1146         struct iavf_adapter *adapter = netdev_priv(netdev);
1147         struct iavf_mac_filter *f;
1148
1149         /* Under some circumstances, we might receive a request to delete
1150          * our own device address from our uc list. Because we store the
1151          * device address in the VSI's MAC/VLAN filter list, we need to ignore
1152          * such requests and not delete our device address from this list.
1153          */
1154         if (ether_addr_equal(addr, netdev->dev_addr))
1155                 return 0;
1156
1157         f = iavf_find_filter(adapter, addr);
1158         if (f) {
1159                 f->remove = true;
1160                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1161         }
1162         return 0;
1163 }
1164
1165 /**
1166  * iavf_set_rx_mode - NDO callback to set the netdev filters
1167  * @netdev: network interface device structure
1168  **/
1169 static void iavf_set_rx_mode(struct net_device *netdev)
1170 {
1171         struct iavf_adapter *adapter = netdev_priv(netdev);
1172
1173         spin_lock_bh(&adapter->mac_vlan_list_lock);
1174         __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1175         __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1176         spin_unlock_bh(&adapter->mac_vlan_list_lock);
1177
1178         if (netdev->flags & IFF_PROMISC &&
1179             !(adapter->flags & IAVF_FLAG_PROMISC_ON))
1180                 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
1181         else if (!(netdev->flags & IFF_PROMISC) &&
1182                  adapter->flags & IAVF_FLAG_PROMISC_ON)
1183                 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
1184
1185         if (netdev->flags & IFF_ALLMULTI &&
1186             !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
1187                 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
1188         else if (!(netdev->flags & IFF_ALLMULTI) &&
1189                  adapter->flags & IAVF_FLAG_ALLMULTI_ON)
1190                 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
1191 }
1192
1193 /**
1194  * iavf_napi_enable_all - enable NAPI on all queue vectors
1195  * @adapter: board private structure
1196  **/
1197 static void iavf_napi_enable_all(struct iavf_adapter *adapter)
1198 {
1199         int q_idx;
1200         struct iavf_q_vector *q_vector;
1201         int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1202
1203         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1204                 struct napi_struct *napi;
1205
1206                 q_vector = &adapter->q_vectors[q_idx];
1207                 napi = &q_vector->napi;
1208                 napi_enable(napi);
1209         }
1210 }
1211
1212 /**
1213  * iavf_napi_disable_all - disable NAPI on all queue vectors
1214  * @adapter: board private structure
1215  **/
1216 static void iavf_napi_disable_all(struct iavf_adapter *adapter)
1217 {
1218         int q_idx;
1219         struct iavf_q_vector *q_vector;
1220         int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1221
1222         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1223                 q_vector = &adapter->q_vectors[q_idx];
1224                 napi_disable(&q_vector->napi);
1225         }
1226 }
1227
1228 /**
1229  * iavf_configure - set up transmit and receive data structures
1230  * @adapter: board private structure
1231  **/
1232 static void iavf_configure(struct iavf_adapter *adapter)
1233 {
1234         struct net_device *netdev = adapter->netdev;
1235         int i;
1236
1237         iavf_set_rx_mode(netdev);
1238
1239         iavf_configure_tx(adapter);
1240         iavf_configure_rx(adapter);
1241         adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
1242
1243         for (i = 0; i < adapter->num_active_queues; i++) {
1244                 struct iavf_ring *ring = &adapter->rx_rings[i];
1245
1246                 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
1247         }
1248 }
1249
1250 /**
1251  * iavf_up_complete - Finish the last steps of bringing up a connection
1252  * @adapter: board private structure
1253  *
1254  * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1255  **/
1256 static void iavf_up_complete(struct iavf_adapter *adapter)
1257 {
1258         iavf_change_state(adapter, __IAVF_RUNNING);
1259         clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1260
1261         iavf_napi_enable_all(adapter);
1262
1263         adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
1264         if (CLIENT_ENABLED(adapter))
1265                 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
1266         mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1267 }
1268
1269 /**
1270  * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
1271  * yet and mark other to be removed.
1272  * @adapter: board private structure
1273  **/
1274 static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
1275 {
1276         struct iavf_vlan_filter *vlf, *vlftmp;
1277         struct iavf_mac_filter *f, *ftmp;
1278
1279         spin_lock_bh(&adapter->mac_vlan_list_lock);
1280         /* clear the sync flag on all filters */
1281         __dev_uc_unsync(adapter->netdev, NULL);
1282         __dev_mc_unsync(adapter->netdev, NULL);
1283
1284         /* remove all MAC filters */
1285         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
1286                                  list) {
1287                 if (f->add) {
1288                         list_del(&f->list);
1289                         kfree(f);
1290                 } else {
1291                         f->remove = true;
1292                 }
1293         }
1294
1295         /* remove all VLAN filters */
1296         list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
1297                                  list) {
1298                 if (vlf->add) {
1299                         list_del(&vlf->list);
1300                         kfree(vlf);
1301                 } else {
1302                         vlf->remove = true;
1303                 }
1304         }
1305         spin_unlock_bh(&adapter->mac_vlan_list_lock);
1306 }
1307
1308 /**
1309  * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
1310  * mark other to be removed.
1311  * @adapter: board private structure
1312  **/
1313 static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
1314 {
1315         struct iavf_cloud_filter *cf, *cftmp;
1316
1317         /* remove all cloud filters */
1318         spin_lock_bh(&adapter->cloud_filter_list_lock);
1319         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
1320                                  list) {
1321                 if (cf->add) {
1322                         list_del(&cf->list);
1323                         kfree(cf);
1324                         adapter->num_cloud_filters--;
1325                 } else {
1326                         cf->del = true;
1327                 }
1328         }
1329         spin_unlock_bh(&adapter->cloud_filter_list_lock);
1330 }
1331
1332 /**
1333  * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
1334  * other to be removed.
1335  * @adapter: board private structure
1336  **/
1337 static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
1338 {
1339         struct iavf_fdir_fltr *fdir, *fdirtmp;
1340
1341         /* remove all Flow Director filters */
1342         spin_lock_bh(&adapter->fdir_fltr_lock);
1343         list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
1344                                  list) {
1345                 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1346                         list_del(&fdir->list);
1347                         kfree(fdir);
1348                         adapter->fdir_active_fltr--;
1349                 } else {
1350                         fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1351                 }
1352         }
1353         spin_unlock_bh(&adapter->fdir_fltr_lock);
1354 }
1355
1356 /**
1357  * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
1358  * other to be removed.
1359  * @adapter: board private structure
1360  **/
1361 static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
1362 {
1363         struct iavf_adv_rss *rss, *rsstmp;
1364
1365         /* remove all advance RSS configuration */
1366         spin_lock_bh(&adapter->adv_rss_lock);
1367         list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
1368                                  list) {
1369                 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
1370                         list_del(&rss->list);
1371                         kfree(rss);
1372                 } else {
1373                         rss->state = IAVF_ADV_RSS_DEL_REQUEST;
1374                 }
1375         }
1376         spin_unlock_bh(&adapter->adv_rss_lock);
1377 }
1378
1379 /**
1380  * iavf_down - Shutdown the connection processing
1381  * @adapter: board private structure
1382  *
1383  * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1384  **/
1385 void iavf_down(struct iavf_adapter *adapter)
1386 {
1387         struct net_device *netdev = adapter->netdev;
1388
1389         if (adapter->state <= __IAVF_DOWN_PENDING)
1390                 return;
1391
1392         netif_carrier_off(netdev);
1393         netif_tx_disable(netdev);
1394         adapter->link_up = false;
1395         iavf_napi_disable_all(adapter);
1396         iavf_irq_disable(adapter);
1397
1398         iavf_clear_mac_vlan_filters(adapter);
1399         iavf_clear_cloud_filters(adapter);
1400         iavf_clear_fdir_filters(adapter);
1401         iavf_clear_adv_rss_conf(adapter);
1402
1403         if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
1404                 /* cancel any current operation */
1405                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1406                 /* Schedule operations to close down the HW. Don't wait
1407                  * here for this to complete. The watchdog is still running
1408                  * and it will take care of this.
1409                  */
1410                 if (!list_empty(&adapter->mac_filter_list))
1411                         adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1412                 if (!list_empty(&adapter->vlan_filter_list))
1413                         adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1414                 if (!list_empty(&adapter->cloud_filter_list))
1415                         adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1416                 if (!list_empty(&adapter->fdir_list_head))
1417                         adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1418                 if (!list_empty(&adapter->adv_rss_list_head))
1419                         adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1420                 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1421         }
1422
1423         mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1424 }
1425
1426 /**
1427  * iavf_acquire_msix_vectors - Setup the MSIX capability
1428  * @adapter: board private structure
1429  * @vectors: number of vectors to request
1430  *
1431  * Work with the OS to set up the MSIX vectors needed.
1432  *
1433  * Returns 0 on success, negative on failure
1434  **/
1435 static int
1436 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1437 {
1438         int err, vector_threshold;
1439
1440         /* We'll want at least 3 (vector_threshold):
1441          * 0) Other (Admin Queue and link, mostly)
1442          * 1) TxQ[0] Cleanup
1443          * 2) RxQ[0] Cleanup
1444          */
1445         vector_threshold = MIN_MSIX_COUNT;
1446
1447         /* The more we get, the more we will assign to Tx/Rx Cleanup
1448          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1449          * Right now, we simply care about how many we'll get; we'll
1450          * set them up later while requesting irq's.
1451          */
1452         err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1453                                     vector_threshold, vectors);
1454         if (err < 0) {
1455                 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1456                 kfree(adapter->msix_entries);
1457                 adapter->msix_entries = NULL;
1458                 return err;
1459         }
1460
1461         /* Adjust for only the vectors we'll use, which is minimum
1462          * of max_msix_q_vectors + NONQ_VECS, or the number of
1463          * vectors we were allocated.
1464          */
1465         adapter->num_msix_vectors = err;
1466         return 0;
1467 }
1468
1469 /**
1470  * iavf_free_queues - Free memory for all rings
1471  * @adapter: board private structure to initialize
1472  *
1473  * Free all of the memory associated with queue pairs.
1474  **/
1475 static void iavf_free_queues(struct iavf_adapter *adapter)
1476 {
1477         if (!adapter->vsi_res)
1478                 return;
1479         adapter->num_active_queues = 0;
1480         kfree(adapter->tx_rings);
1481         adapter->tx_rings = NULL;
1482         kfree(adapter->rx_rings);
1483         adapter->rx_rings = NULL;
1484 }
1485
1486 /**
1487  * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
1488  * @adapter: board private structure
1489  *
1490  * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
1491  * stripped in certain descriptor fields. Instead of checking the offload
1492  * capability bits in the hot path, cache the location the ring specific
1493  * flags.
1494  */
1495 void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
1496 {
1497         int i;
1498
1499         for (i = 0; i < adapter->num_active_queues; i++) {
1500                 struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1501                 struct iavf_ring *rx_ring = &adapter->rx_rings[i];
1502
1503                 /* prevent multiple L2TAG bits being set after VFR */
1504                 tx_ring->flags &=
1505                         ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1506                           IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
1507                 rx_ring->flags &=
1508                         ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1509                           IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
1510
1511                 if (VLAN_ALLOWED(adapter)) {
1512                         tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1513                         rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1514                 } else if (VLAN_V2_ALLOWED(adapter)) {
1515                         struct virtchnl_vlan_supported_caps *stripping_support;
1516                         struct virtchnl_vlan_supported_caps *insertion_support;
1517
1518                         stripping_support =
1519                                 &adapter->vlan_v2_caps.offloads.stripping_support;
1520                         insertion_support =
1521                                 &adapter->vlan_v2_caps.offloads.insertion_support;
1522
1523                         if (stripping_support->outer) {
1524                                 if (stripping_support->outer &
1525                                     VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1526                                         rx_ring->flags |=
1527                                                 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1528                                 else if (stripping_support->outer &
1529                                          VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1530                                         rx_ring->flags |=
1531                                                 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1532                         } else if (stripping_support->inner) {
1533                                 if (stripping_support->inner &
1534                                     VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1535                                         rx_ring->flags |=
1536                                                 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1537                                 else if (stripping_support->inner &
1538                                          VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1539                                         rx_ring->flags |=
1540                                                 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1541                         }
1542
1543                         if (insertion_support->outer) {
1544                                 if (insertion_support->outer &
1545                                     VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1546                                         tx_ring->flags |=
1547                                                 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1548                                 else if (insertion_support->outer &
1549                                          VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1550                                         tx_ring->flags |=
1551                                                 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1552                         } else if (insertion_support->inner) {
1553                                 if (insertion_support->inner &
1554                                     VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1555                                         tx_ring->flags |=
1556                                                 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1557                                 else if (insertion_support->inner &
1558                                          VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1559                                         tx_ring->flags |=
1560                                                 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1561                         }
1562                 }
1563         }
1564 }
1565
1566 /**
1567  * iavf_alloc_queues - Allocate memory for all rings
1568  * @adapter: board private structure to initialize
1569  *
1570  * We allocate one ring per queue at run-time since we don't know the
1571  * number of queues at compile-time.  The polling_netdev array is
1572  * intended for Multiqueue, but should work fine with a single queue.
1573  **/
1574 static int iavf_alloc_queues(struct iavf_adapter *adapter)
1575 {
1576         int i, num_active_queues;
1577
1578         /* If we're in reset reallocating queues we don't actually know yet for
1579          * certain the PF gave us the number of queues we asked for but we'll
1580          * assume it did.  Once basic reset is finished we'll confirm once we
1581          * start negotiating config with PF.
1582          */
1583         if (adapter->num_req_queues)
1584                 num_active_queues = adapter->num_req_queues;
1585         else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1586                  adapter->num_tc)
1587                 num_active_queues = adapter->ch_config.total_qps;
1588         else
1589                 num_active_queues = min_t(int,
1590                                           adapter->vsi_res->num_queue_pairs,
1591                                           (int)(num_online_cpus()));
1592
1593
1594         adapter->tx_rings = kcalloc(num_active_queues,
1595                                     sizeof(struct iavf_ring), GFP_KERNEL);
1596         if (!adapter->tx_rings)
1597                 goto err_out;
1598         adapter->rx_rings = kcalloc(num_active_queues,
1599                                     sizeof(struct iavf_ring), GFP_KERNEL);
1600         if (!adapter->rx_rings)
1601                 goto err_out;
1602
1603         for (i = 0; i < num_active_queues; i++) {
1604                 struct iavf_ring *tx_ring;
1605                 struct iavf_ring *rx_ring;
1606
1607                 tx_ring = &adapter->tx_rings[i];
1608
1609                 tx_ring->queue_index = i;
1610                 tx_ring->netdev = adapter->netdev;
1611                 tx_ring->dev = &adapter->pdev->dev;
1612                 tx_ring->count = adapter->tx_desc_count;
1613                 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1614                 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1615                         tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1616
1617                 rx_ring = &adapter->rx_rings[i];
1618                 rx_ring->queue_index = i;
1619                 rx_ring->netdev = adapter->netdev;
1620                 rx_ring->dev = &adapter->pdev->dev;
1621                 rx_ring->count = adapter->rx_desc_count;
1622                 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1623         }
1624
1625         adapter->num_active_queues = num_active_queues;
1626
1627         iavf_set_queue_vlan_tag_loc(adapter);
1628
1629         return 0;
1630
1631 err_out:
1632         iavf_free_queues(adapter);
1633         return -ENOMEM;
1634 }
1635
1636 /**
1637  * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1638  * @adapter: board private structure to initialize
1639  *
1640  * Attempt to configure the interrupts using the best available
1641  * capabilities of the hardware and the kernel.
1642  **/
1643 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1644 {
1645         int vector, v_budget;
1646         int pairs = 0;
1647         int err = 0;
1648
1649         if (!adapter->vsi_res) {
1650                 err = -EIO;
1651                 goto out;
1652         }
1653         pairs = adapter->num_active_queues;
1654
1655         /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1656          * us much good if we have more vectors than CPUs. However, we already
1657          * limit the total number of queues by the number of CPUs so we do not
1658          * need any further limiting here.
1659          */
1660         v_budget = min_t(int, pairs + NONQ_VECS,
1661                          (int)adapter->vf_res->max_vectors);
1662
1663         adapter->msix_entries = kcalloc(v_budget,
1664                                         sizeof(struct msix_entry), GFP_KERNEL);
1665         if (!adapter->msix_entries) {
1666                 err = -ENOMEM;
1667                 goto out;
1668         }
1669
1670         for (vector = 0; vector < v_budget; vector++)
1671                 adapter->msix_entries[vector].entry = vector;
1672
1673         err = iavf_acquire_msix_vectors(adapter, v_budget);
1674
1675 out:
1676         netif_set_real_num_rx_queues(adapter->netdev, pairs);
1677         netif_set_real_num_tx_queues(adapter->netdev, pairs);
1678         return err;
1679 }
1680
1681 /**
1682  * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1683  * @adapter: board private structure
1684  *
1685  * Return 0 on success, negative on failure
1686  **/
1687 static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1688 {
1689         struct iavf_aqc_get_set_rss_key_data *rss_key =
1690                 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1691         struct iavf_hw *hw = &adapter->hw;
1692         enum iavf_status status;
1693
1694         if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1695                 /* bail because we already have a command pending */
1696                 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1697                         adapter->current_op);
1698                 return -EBUSY;
1699         }
1700
1701         status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1702         if (status) {
1703                 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1704                         iavf_stat_str(hw, status),
1705                         iavf_aq_str(hw, hw->aq.asq_last_status));
1706                 return iavf_status_to_errno(status);
1707
1708         }
1709
1710         status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1711                                      adapter->rss_lut, adapter->rss_lut_size);
1712         if (status) {
1713                 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1714                         iavf_stat_str(hw, status),
1715                         iavf_aq_str(hw, hw->aq.asq_last_status));
1716                 return iavf_status_to_errno(status);
1717         }
1718
1719         return 0;
1720
1721 }
1722
1723 /**
1724  * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1725  * @adapter: board private structure
1726  *
1727  * Returns 0 on success, negative on failure
1728  **/
1729 static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1730 {
1731         struct iavf_hw *hw = &adapter->hw;
1732         u32 *dw;
1733         u16 i;
1734
1735         dw = (u32 *)adapter->rss_key;
1736         for (i = 0; i <= adapter->rss_key_size / 4; i++)
1737                 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1738
1739         dw = (u32 *)adapter->rss_lut;
1740         for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1741                 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1742
1743         iavf_flush(hw);
1744
1745         return 0;
1746 }
1747
1748 /**
1749  * iavf_config_rss - Configure RSS keys and lut
1750  * @adapter: board private structure
1751  *
1752  * Returns 0 on success, negative on failure
1753  **/
1754 int iavf_config_rss(struct iavf_adapter *adapter)
1755 {
1756
1757         if (RSS_PF(adapter)) {
1758                 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1759                                         IAVF_FLAG_AQ_SET_RSS_KEY;
1760                 return 0;
1761         } else if (RSS_AQ(adapter)) {
1762                 return iavf_config_rss_aq(adapter);
1763         } else {
1764                 return iavf_config_rss_reg(adapter);
1765         }
1766 }
1767
1768 /**
1769  * iavf_fill_rss_lut - Fill the lut with default values
1770  * @adapter: board private structure
1771  **/
1772 static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1773 {
1774         u16 i;
1775
1776         for (i = 0; i < adapter->rss_lut_size; i++)
1777                 adapter->rss_lut[i] = i % adapter->num_active_queues;
1778 }
1779
1780 /**
1781  * iavf_init_rss - Prepare for RSS
1782  * @adapter: board private structure
1783  *
1784  * Return 0 on success, negative on failure
1785  **/
1786 static int iavf_init_rss(struct iavf_adapter *adapter)
1787 {
1788         struct iavf_hw *hw = &adapter->hw;
1789
1790         if (!RSS_PF(adapter)) {
1791                 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1792                 if (adapter->vf_res->vf_cap_flags &
1793                     VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1794                         adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1795                 else
1796                         adapter->hena = IAVF_DEFAULT_RSS_HENA;
1797
1798                 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1799                 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1800         }
1801
1802         iavf_fill_rss_lut(adapter);
1803         netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1804
1805         return iavf_config_rss(adapter);
1806 }
1807
1808 /**
1809  * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1810  * @adapter: board private structure to initialize
1811  *
1812  * We allocate one q_vector per queue interrupt.  If allocation fails we
1813  * return -ENOMEM.
1814  **/
1815 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1816 {
1817         int q_idx = 0, num_q_vectors;
1818         struct iavf_q_vector *q_vector;
1819
1820         num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1821         adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1822                                      GFP_KERNEL);
1823         if (!adapter->q_vectors)
1824                 return -ENOMEM;
1825
1826         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1827                 q_vector = &adapter->q_vectors[q_idx];
1828                 q_vector->adapter = adapter;
1829                 q_vector->vsi = &adapter->vsi;
1830                 q_vector->v_idx = q_idx;
1831                 q_vector->reg_idx = q_idx;
1832                 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1833                 netif_napi_add(adapter->netdev, &q_vector->napi,
1834                                iavf_napi_poll);
1835         }
1836
1837         return 0;
1838 }
1839
1840 /**
1841  * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1842  * @adapter: board private structure to initialize
1843  *
1844  * This function frees the memory allocated to the q_vectors.  In addition if
1845  * NAPI is enabled it will delete any references to the NAPI struct prior
1846  * to freeing the q_vector.
1847  **/
1848 static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1849 {
1850         int q_idx, num_q_vectors;
1851         int napi_vectors;
1852
1853         if (!adapter->q_vectors)
1854                 return;
1855
1856         num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1857         napi_vectors = adapter->num_active_queues;
1858
1859         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1860                 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1861
1862                 if (q_idx < napi_vectors)
1863                         netif_napi_del(&q_vector->napi);
1864         }
1865         kfree(adapter->q_vectors);
1866         adapter->q_vectors = NULL;
1867 }
1868
1869 /**
1870  * iavf_reset_interrupt_capability - Reset MSIX setup
1871  * @adapter: board private structure
1872  *
1873  **/
1874 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1875 {
1876         if (!adapter->msix_entries)
1877                 return;
1878
1879         pci_disable_msix(adapter->pdev);
1880         kfree(adapter->msix_entries);
1881         adapter->msix_entries = NULL;
1882 }
1883
1884 /**
1885  * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1886  * @adapter: board private structure to initialize
1887  *
1888  **/
1889 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1890 {
1891         int err;
1892
1893         err = iavf_alloc_queues(adapter);
1894         if (err) {
1895                 dev_err(&adapter->pdev->dev,
1896                         "Unable to allocate memory for queues\n");
1897                 goto err_alloc_queues;
1898         }
1899
1900         rtnl_lock();
1901         err = iavf_set_interrupt_capability(adapter);
1902         rtnl_unlock();
1903         if (err) {
1904                 dev_err(&adapter->pdev->dev,
1905                         "Unable to setup interrupt capabilities\n");
1906                 goto err_set_interrupt;
1907         }
1908
1909         err = iavf_alloc_q_vectors(adapter);
1910         if (err) {
1911                 dev_err(&adapter->pdev->dev,
1912                         "Unable to allocate memory for queue vectors\n");
1913                 goto err_alloc_q_vectors;
1914         }
1915
1916         /* If we've made it so far while ADq flag being ON, then we haven't
1917          * bailed out anywhere in middle. And ADq isn't just enabled but actual
1918          * resources have been allocated in the reset path.
1919          * Now we can truly claim that ADq is enabled.
1920          */
1921         if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1922             adapter->num_tc)
1923                 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1924                          adapter->num_tc);
1925
1926         dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1927                  (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1928                  adapter->num_active_queues);
1929
1930         return 0;
1931 err_alloc_q_vectors:
1932         iavf_reset_interrupt_capability(adapter);
1933 err_set_interrupt:
1934         iavf_free_queues(adapter);
1935 err_alloc_queues:
1936         return err;
1937 }
1938
1939 /**
1940  * iavf_free_rss - Free memory used by RSS structs
1941  * @adapter: board private structure
1942  **/
1943 static void iavf_free_rss(struct iavf_adapter *adapter)
1944 {
1945         kfree(adapter->rss_key);
1946         adapter->rss_key = NULL;
1947
1948         kfree(adapter->rss_lut);
1949         adapter->rss_lut = NULL;
1950 }
1951
1952 /**
1953  * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1954  * @adapter: board private structure
1955  *
1956  * Returns 0 on success, negative on failure
1957  **/
1958 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1959 {
1960         struct net_device *netdev = adapter->netdev;
1961         int err;
1962
1963         if (netif_running(netdev))
1964                 iavf_free_traffic_irqs(adapter);
1965         iavf_free_misc_irq(adapter);
1966         iavf_reset_interrupt_capability(adapter);
1967         iavf_free_q_vectors(adapter);
1968         iavf_free_queues(adapter);
1969
1970         err =  iavf_init_interrupt_scheme(adapter);
1971         if (err)
1972                 goto err;
1973
1974         netif_tx_stop_all_queues(netdev);
1975
1976         err = iavf_request_misc_irq(adapter);
1977         if (err)
1978                 goto err;
1979
1980         set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1981
1982         iavf_map_rings_to_vectors(adapter);
1983 err:
1984         return err;
1985 }
1986
1987 /**
1988  * iavf_process_aq_command - process aq_required flags
1989  * and sends aq command
1990  * @adapter: pointer to iavf adapter structure
1991  *
1992  * Returns 0 on success
1993  * Returns error code if no command was sent
1994  * or error code if the command failed.
1995  **/
1996 static int iavf_process_aq_command(struct iavf_adapter *adapter)
1997 {
1998         if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
1999                 return iavf_send_vf_config_msg(adapter);
2000         if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
2001                 return iavf_send_vf_offload_vlan_v2_msg(adapter);
2002         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
2003                 iavf_disable_queues(adapter);
2004                 return 0;
2005         }
2006
2007         if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
2008                 iavf_map_queues(adapter);
2009                 return 0;
2010         }
2011
2012         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
2013                 iavf_add_ether_addrs(adapter);
2014                 return 0;
2015         }
2016
2017         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
2018                 iavf_add_vlans(adapter);
2019                 return 0;
2020         }
2021
2022         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
2023                 iavf_del_ether_addrs(adapter);
2024                 return 0;
2025         }
2026
2027         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
2028                 iavf_del_vlans(adapter);
2029                 return 0;
2030         }
2031
2032         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
2033                 iavf_enable_vlan_stripping(adapter);
2034                 return 0;
2035         }
2036
2037         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
2038                 iavf_disable_vlan_stripping(adapter);
2039                 return 0;
2040         }
2041
2042         if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
2043                 iavf_configure_queues(adapter);
2044                 return 0;
2045         }
2046
2047         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
2048                 iavf_enable_queues(adapter);
2049                 return 0;
2050         }
2051
2052         if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
2053                 /* This message goes straight to the firmware, not the
2054                  * PF, so we don't have to set current_op as we will
2055                  * not get a response through the ARQ.
2056                  */
2057                 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
2058                 return 0;
2059         }
2060         if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
2061                 iavf_get_hena(adapter);
2062                 return 0;
2063         }
2064         if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
2065                 iavf_set_hena(adapter);
2066                 return 0;
2067         }
2068         if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
2069                 iavf_set_rss_key(adapter);
2070                 return 0;
2071         }
2072         if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
2073                 iavf_set_rss_lut(adapter);
2074                 return 0;
2075         }
2076
2077         if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
2078                 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
2079                                        FLAG_VF_MULTICAST_PROMISC);
2080                 return 0;
2081         }
2082
2083         if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
2084                 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
2085                 return 0;
2086         }
2087         if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
2088             (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
2089                 iavf_set_promiscuous(adapter, 0);
2090                 return 0;
2091         }
2092
2093         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
2094                 iavf_enable_channels(adapter);
2095                 return 0;
2096         }
2097
2098         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
2099                 iavf_disable_channels(adapter);
2100                 return 0;
2101         }
2102         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2103                 iavf_add_cloud_filter(adapter);
2104                 return 0;
2105         }
2106
2107         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2108                 iavf_del_cloud_filter(adapter);
2109                 return 0;
2110         }
2111         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2112                 iavf_del_cloud_filter(adapter);
2113                 return 0;
2114         }
2115         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2116                 iavf_add_cloud_filter(adapter);
2117                 return 0;
2118         }
2119         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
2120                 iavf_add_fdir_filter(adapter);
2121                 return IAVF_SUCCESS;
2122         }
2123         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
2124                 iavf_del_fdir_filter(adapter);
2125                 return IAVF_SUCCESS;
2126         }
2127         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
2128                 iavf_add_adv_rss_cfg(adapter);
2129                 return 0;
2130         }
2131         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
2132                 iavf_del_adv_rss_cfg(adapter);
2133                 return 0;
2134         }
2135         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
2136                 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2137                 return 0;
2138         }
2139         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
2140                 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2141                 return 0;
2142         }
2143         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
2144                 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2145                 return 0;
2146         }
2147         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
2148                 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2149                 return 0;
2150         }
2151         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
2152                 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2153                 return 0;
2154         }
2155         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
2156                 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2157                 return 0;
2158         }
2159         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
2160                 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2161                 return 0;
2162         }
2163         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
2164                 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2165                 return 0;
2166         }
2167
2168         if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
2169                 iavf_request_stats(adapter);
2170                 return 0;
2171         }
2172
2173         return -EAGAIN;
2174 }
2175
2176 /**
2177  * iavf_set_vlan_offload_features - set VLAN offload configuration
2178  * @adapter: board private structure
2179  * @prev_features: previous features used for comparison
2180  * @features: updated features used for configuration
2181  *
2182  * Set the aq_required bit(s) based on the requested features passed in to
2183  * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
2184  * the watchdog if any changes are requested to expedite the request via
2185  * virtchnl.
2186  **/
2187 void
2188 iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
2189                                netdev_features_t prev_features,
2190                                netdev_features_t features)
2191 {
2192         bool enable_stripping = true, enable_insertion = true;
2193         u16 vlan_ethertype = 0;
2194         u64 aq_required = 0;
2195
2196         /* keep cases separate because one ethertype for offloads can be
2197          * disabled at the same time as another is disabled, so check for an
2198          * enabled ethertype first, then check for disabled. Default to
2199          * ETH_P_8021Q so an ethertype is specified if disabling insertion and
2200          * stripping.
2201          */
2202         if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2203                 vlan_ethertype = ETH_P_8021AD;
2204         else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2205                 vlan_ethertype = ETH_P_8021Q;
2206         else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2207                 vlan_ethertype = ETH_P_8021AD;
2208         else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2209                 vlan_ethertype = ETH_P_8021Q;
2210         else
2211                 vlan_ethertype = ETH_P_8021Q;
2212
2213         if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
2214                 enable_stripping = false;
2215         if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
2216                 enable_insertion = false;
2217
2218         if (VLAN_ALLOWED(adapter)) {
2219                 /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
2220                  * stripping via virtchnl. VLAN insertion can be toggled on the
2221                  * netdev, but it doesn't require a virtchnl message
2222                  */
2223                 if (enable_stripping)
2224                         aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
2225                 else
2226                         aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
2227
2228         } else if (VLAN_V2_ALLOWED(adapter)) {
2229                 switch (vlan_ethertype) {
2230                 case ETH_P_8021Q:
2231                         if (enable_stripping)
2232                                 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
2233                         else
2234                                 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
2235
2236                         if (enable_insertion)
2237                                 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
2238                         else
2239                                 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
2240                         break;
2241                 case ETH_P_8021AD:
2242                         if (enable_stripping)
2243                                 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
2244                         else
2245                                 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
2246
2247                         if (enable_insertion)
2248                                 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
2249                         else
2250                                 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
2251                         break;
2252                 }
2253         }
2254
2255         if (aq_required) {
2256                 adapter->aq_required |= aq_required;
2257                 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
2258         }
2259 }
2260
2261 /**
2262  * iavf_startup - first step of driver startup
2263  * @adapter: board private structure
2264  *
2265  * Function process __IAVF_STARTUP driver state.
2266  * When success the state is changed to __IAVF_INIT_VERSION_CHECK
2267  * when fails the state is changed to __IAVF_INIT_FAILED
2268  **/
2269 static void iavf_startup(struct iavf_adapter *adapter)
2270 {
2271         struct pci_dev *pdev = adapter->pdev;
2272         struct iavf_hw *hw = &adapter->hw;
2273         enum iavf_status status;
2274         int ret;
2275
2276         WARN_ON(adapter->state != __IAVF_STARTUP);
2277
2278         /* driver loaded, probe complete */
2279         adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2280         adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2281         status = iavf_set_mac_type(hw);
2282         if (status) {
2283                 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
2284                 goto err;
2285         }
2286
2287         ret = iavf_check_reset_complete(hw);
2288         if (ret) {
2289                 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2290                          ret);
2291                 goto err;
2292         }
2293         hw->aq.num_arq_entries = IAVF_AQ_LEN;
2294         hw->aq.num_asq_entries = IAVF_AQ_LEN;
2295         hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2296         hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2297
2298         status = iavf_init_adminq(hw);
2299         if (status) {
2300                 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2301                         status);
2302                 goto err;
2303         }
2304         ret = iavf_send_api_ver(adapter);
2305         if (ret) {
2306                 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
2307                 iavf_shutdown_adminq(hw);
2308                 goto err;
2309         }
2310         iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
2311         return;
2312 err:
2313         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2314 }
2315
2316 /**
2317  * iavf_init_version_check - second step of driver startup
2318  * @adapter: board private structure
2319  *
2320  * Function process __IAVF_INIT_VERSION_CHECK driver state.
2321  * When success the state is changed to __IAVF_INIT_GET_RESOURCES
2322  * when fails the state is changed to __IAVF_INIT_FAILED
2323  **/
2324 static void iavf_init_version_check(struct iavf_adapter *adapter)
2325 {
2326         struct pci_dev *pdev = adapter->pdev;
2327         struct iavf_hw *hw = &adapter->hw;
2328         int err = -EAGAIN;
2329
2330         WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
2331
2332         if (!iavf_asq_done(hw)) {
2333                 dev_err(&pdev->dev, "Admin queue command never completed\n");
2334                 iavf_shutdown_adminq(hw);
2335                 iavf_change_state(adapter, __IAVF_STARTUP);
2336                 goto err;
2337         }
2338
2339         /* aq msg sent, awaiting reply */
2340         err = iavf_verify_api_ver(adapter);
2341         if (err) {
2342                 if (err == -EALREADY)
2343                         err = iavf_send_api_ver(adapter);
2344                 else
2345                         dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2346                                 adapter->pf_version.major,
2347                                 adapter->pf_version.minor,
2348                                 VIRTCHNL_VERSION_MAJOR,
2349                                 VIRTCHNL_VERSION_MINOR);
2350                 goto err;
2351         }
2352         err = iavf_send_vf_config_msg(adapter);
2353         if (err) {
2354                 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2355                         err);
2356                 goto err;
2357         }
2358         iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
2359         return;
2360 err:
2361         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2362 }
2363
2364 /**
2365  * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
2366  * @adapter: board private structure
2367  */
2368 int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
2369 {
2370         int i, num_req_queues = adapter->num_req_queues;
2371         struct iavf_vsi *vsi = &adapter->vsi;
2372
2373         for (i = 0; i < adapter->vf_res->num_vsis; i++) {
2374                 if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
2375                         adapter->vsi_res = &adapter->vf_res->vsi_res[i];
2376         }
2377         if (!adapter->vsi_res) {
2378                 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2379                 return -ENODEV;
2380         }
2381
2382         if (num_req_queues &&
2383             num_req_queues > adapter->vsi_res->num_queue_pairs) {
2384                 /* Problem.  The PF gave us fewer queues than what we had
2385                  * negotiated in our request.  Need a reset to see if we can't
2386                  * get back to a working state.
2387                  */
2388                 dev_err(&adapter->pdev->dev,
2389                         "Requested %d queues, but PF only gave us %d.\n",
2390                         num_req_queues,
2391                         adapter->vsi_res->num_queue_pairs);
2392                 adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
2393                 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
2394                 iavf_schedule_reset(adapter);
2395
2396                 return -EAGAIN;
2397         }
2398         adapter->num_req_queues = 0;
2399         adapter->vsi.id = adapter->vsi_res->vsi_id;
2400
2401         adapter->vsi.back = adapter;
2402         adapter->vsi.base_vector = 1;
2403         vsi->netdev = adapter->netdev;
2404         vsi->qs_handle = adapter->vsi_res->qset_handle;
2405         if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2406                 adapter->rss_key_size = adapter->vf_res->rss_key_size;
2407                 adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
2408         } else {
2409                 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
2410                 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
2411         }
2412
2413         return 0;
2414 }
2415
2416 /**
2417  * iavf_init_get_resources - third step of driver startup
2418  * @adapter: board private structure
2419  *
2420  * Function process __IAVF_INIT_GET_RESOURCES driver state and
2421  * finishes driver initialization procedure.
2422  * When success the state is changed to __IAVF_DOWN
2423  * when fails the state is changed to __IAVF_INIT_FAILED
2424  **/
2425 static void iavf_init_get_resources(struct iavf_adapter *adapter)
2426 {
2427         struct pci_dev *pdev = adapter->pdev;
2428         struct iavf_hw *hw = &adapter->hw;
2429         int err;
2430
2431         WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
2432         /* aq msg sent, awaiting reply */
2433         if (!adapter->vf_res) {
2434                 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
2435                                           GFP_KERNEL);
2436                 if (!adapter->vf_res) {
2437                         err = -ENOMEM;
2438                         goto err;
2439                 }
2440         }
2441         err = iavf_get_vf_config(adapter);
2442         if (err == -EALREADY) {
2443                 err = iavf_send_vf_config_msg(adapter);
2444                 goto err;
2445         } else if (err == -EINVAL) {
2446                 /* We only get -EINVAL if the device is in a very bad
2447                  * state or if we've been disabled for previous bad
2448                  * behavior. Either way, we're done now.
2449                  */
2450                 iavf_shutdown_adminq(hw);
2451                 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2452                 return;
2453         }
2454         if (err) {
2455                 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
2456                 goto err_alloc;
2457         }
2458
2459         err = iavf_parse_vf_resource_msg(adapter);
2460         if (err) {
2461                 dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
2462                         err);
2463                 goto err_alloc;
2464         }
2465         /* Some features require additional messages to negotiate extended
2466          * capabilities. These are processed in sequence by the
2467          * __IAVF_INIT_EXTENDED_CAPS driver state.
2468          */
2469         adapter->extended_caps = IAVF_EXTENDED_CAPS;
2470
2471         iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
2472         return;
2473
2474 err_alloc:
2475         kfree(adapter->vf_res);
2476         adapter->vf_res = NULL;
2477 err:
2478         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2479 }
2480
2481 /**
2482  * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2483  * @adapter: board private structure
2484  *
2485  * Function processes send of the extended VLAN V2 capability message to the
2486  * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
2487  * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2488  */
2489 static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2490 {
2491         int ret;
2492
2493         WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
2494
2495         ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
2496         if (ret && ret == -EOPNOTSUPP) {
2497                 /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
2498                  * we did not send the capability exchange message and do not
2499                  * expect a response.
2500                  */
2501                 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2502         }
2503
2504         /* We sent the message, so move on to the next step */
2505         adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2506 }
2507
2508 /**
2509  * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2510  * @adapter: board private structure
2511  *
2512  * Function processes receipt of the extended VLAN V2 capability message from
2513  * the PF.
2514  **/
2515 static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2516 {
2517         int ret;
2518
2519         WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
2520
2521         memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
2522
2523         ret = iavf_get_vf_vlan_v2_caps(adapter);
2524         if (ret)
2525                 goto err;
2526
2527         /* We've processed receipt of the VLAN V2 caps message */
2528         adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2529         return;
2530 err:
2531         /* We didn't receive a reply. Make sure we try sending again when
2532          * __IAVF_INIT_FAILED attempts to recover.
2533          */
2534         adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2535         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2536 }
2537
2538 /**
2539  * iavf_init_process_extended_caps - Part of driver startup
2540  * @adapter: board private structure
2541  *
2542  * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
2543  * handles negotiating capabilities for features which require an additional
2544  * message.
2545  *
2546  * Once all extended capabilities exchanges are finished, the driver will
2547  * transition into __IAVF_INIT_CONFIG_ADAPTER.
2548  */
2549 static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
2550 {
2551         WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
2552
2553         /* Process capability exchange for VLAN V2 */
2554         if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
2555                 iavf_init_send_offload_vlan_v2_caps(adapter);
2556                 return;
2557         } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
2558                 iavf_init_recv_offload_vlan_v2_caps(adapter);
2559                 return;
2560         }
2561
2562         /* When we reach here, no further extended capabilities exchanges are
2563          * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
2564          */
2565         iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
2566 }
2567
2568 /**
2569  * iavf_init_config_adapter - last part of driver startup
2570  * @adapter: board private structure
2571  *
2572  * After all the supported capabilities are negotiated, then the
2573  * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
2574  */
2575 static void iavf_init_config_adapter(struct iavf_adapter *adapter)
2576 {
2577         struct net_device *netdev = adapter->netdev;
2578         struct pci_dev *pdev = adapter->pdev;
2579         int err;
2580
2581         WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
2582
2583         if (iavf_process_config(adapter))
2584                 goto err;
2585
2586         adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2587
2588         adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
2589
2590         netdev->netdev_ops = &iavf_netdev_ops;
2591         iavf_set_ethtool_ops(netdev);
2592         netdev->watchdog_timeo = 5 * HZ;
2593
2594         /* MTU range: 68 - 9710 */
2595         netdev->min_mtu = ETH_MIN_MTU;
2596         netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
2597
2598         if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2599                 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2600                          adapter->hw.mac.addr);
2601                 eth_hw_addr_random(netdev);
2602                 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2603         } else {
2604                 eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2605                 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2606         }
2607
2608         adapter->flags |= IAVF_FLAG_INITIAL_MAC_SET;
2609
2610         adapter->tx_desc_count = IAVF_DEFAULT_TXD;
2611         adapter->rx_desc_count = IAVF_DEFAULT_RXD;
2612         err = iavf_init_interrupt_scheme(adapter);
2613         if (err)
2614                 goto err_sw_init;
2615         iavf_map_rings_to_vectors(adapter);
2616         if (adapter->vf_res->vf_cap_flags &
2617                 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2618                 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
2619
2620         err = iavf_request_misc_irq(adapter);
2621         if (err)
2622                 goto err_sw_init;
2623
2624         netif_carrier_off(netdev);
2625         adapter->link_up = false;
2626
2627         /* set the semaphore to prevent any callbacks after device registration
2628          * up to time when state of driver will be set to __IAVF_DOWN
2629          */
2630         rtnl_lock();
2631         if (!adapter->netdev_registered) {
2632                 err = register_netdevice(netdev);
2633                 if (err) {
2634                         rtnl_unlock();
2635                         goto err_register;
2636                 }
2637         }
2638
2639         adapter->netdev_registered = true;
2640
2641         netif_tx_stop_all_queues(netdev);
2642         if (CLIENT_ALLOWED(adapter)) {
2643                 err = iavf_lan_add_device(adapter);
2644                 if (err)
2645                         dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2646                                  err);
2647         }
2648         dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2649         if (netdev->features & NETIF_F_GRO)
2650                 dev_info(&pdev->dev, "GRO is enabled\n");
2651
2652         iavf_change_state(adapter, __IAVF_DOWN);
2653         set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2654         rtnl_unlock();
2655
2656         iavf_misc_irq_enable(adapter);
2657         wake_up(&adapter->down_waitqueue);
2658
2659         adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2660         adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2661         if (!adapter->rss_key || !adapter->rss_lut) {
2662                 err = -ENOMEM;
2663                 goto err_mem;
2664         }
2665         if (RSS_AQ(adapter))
2666                 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2667         else
2668                 iavf_init_rss(adapter);
2669
2670         if (VLAN_V2_ALLOWED(adapter))
2671                 /* request initial VLAN offload settings */
2672                 iavf_set_vlan_offload_features(adapter, 0, netdev->features);
2673
2674         return;
2675 err_mem:
2676         iavf_free_rss(adapter);
2677 err_register:
2678         iavf_free_misc_irq(adapter);
2679 err_sw_init:
2680         iavf_reset_interrupt_capability(adapter);
2681 err:
2682         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2683 }
2684
2685 /**
2686  * iavf_watchdog_task - Periodic call-back task
2687  * @work: pointer to work_struct
2688  **/
2689 static void iavf_watchdog_task(struct work_struct *work)
2690 {
2691         struct iavf_adapter *adapter = container_of(work,
2692                                                     struct iavf_adapter,
2693                                                     watchdog_task.work);
2694         struct iavf_hw *hw = &adapter->hw;
2695         u32 reg_val;
2696
2697         if (!mutex_trylock(&adapter->crit_lock)) {
2698                 if (adapter->state == __IAVF_REMOVE)
2699                         return;
2700
2701                 goto restart_watchdog;
2702         }
2703
2704         if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2705                 iavf_change_state(adapter, __IAVF_COMM_FAILED);
2706
2707         if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2708                 adapter->aq_required = 0;
2709                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2710                 mutex_unlock(&adapter->crit_lock);
2711                 queue_work(iavf_wq, &adapter->reset_task);
2712                 return;
2713         }
2714
2715         switch (adapter->state) {
2716         case __IAVF_STARTUP:
2717                 iavf_startup(adapter);
2718                 mutex_unlock(&adapter->crit_lock);
2719                 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2720                                    msecs_to_jiffies(30));
2721                 return;
2722         case __IAVF_INIT_VERSION_CHECK:
2723                 iavf_init_version_check(adapter);
2724                 mutex_unlock(&adapter->crit_lock);
2725                 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2726                                    msecs_to_jiffies(30));
2727                 return;
2728         case __IAVF_INIT_GET_RESOURCES:
2729                 iavf_init_get_resources(adapter);
2730                 mutex_unlock(&adapter->crit_lock);
2731                 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2732                                    msecs_to_jiffies(1));
2733                 return;
2734         case __IAVF_INIT_EXTENDED_CAPS:
2735                 iavf_init_process_extended_caps(adapter);
2736                 mutex_unlock(&adapter->crit_lock);
2737                 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2738                                    msecs_to_jiffies(1));
2739                 return;
2740         case __IAVF_INIT_CONFIG_ADAPTER:
2741                 iavf_init_config_adapter(adapter);
2742                 mutex_unlock(&adapter->crit_lock);
2743                 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2744                                    msecs_to_jiffies(1));
2745                 return;
2746         case __IAVF_INIT_FAILED:
2747                 if (test_bit(__IAVF_IN_REMOVE_TASK,
2748                              &adapter->crit_section)) {
2749                         /* Do not update the state and do not reschedule
2750                          * watchdog task, iavf_remove should handle this state
2751                          * as it can loop forever
2752                          */
2753                         mutex_unlock(&adapter->crit_lock);
2754                         return;
2755                 }
2756                 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2757                         dev_err(&adapter->pdev->dev,
2758                                 "Failed to communicate with PF; waiting before retry\n");
2759                         adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2760                         iavf_shutdown_adminq(hw);
2761                         mutex_unlock(&adapter->crit_lock);
2762                         queue_delayed_work(iavf_wq,
2763                                            &adapter->watchdog_task, (5 * HZ));
2764                         return;
2765                 }
2766                 /* Try again from failed step*/
2767                 iavf_change_state(adapter, adapter->last_state);
2768                 mutex_unlock(&adapter->crit_lock);
2769                 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
2770                 return;
2771         case __IAVF_COMM_FAILED:
2772                 if (test_bit(__IAVF_IN_REMOVE_TASK,
2773                              &adapter->crit_section)) {
2774                         /* Set state to __IAVF_INIT_FAILED and perform remove
2775                          * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
2776                          * doesn't bring the state back to __IAVF_COMM_FAILED.
2777                          */
2778                         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2779                         adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2780                         mutex_unlock(&adapter->crit_lock);
2781                         return;
2782                 }
2783                 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2784                           IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2785                 if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2786                     reg_val == VIRTCHNL_VFR_COMPLETED) {
2787                         /* A chance for redemption! */
2788                         dev_err(&adapter->pdev->dev,
2789                                 "Hardware came out of reset. Attempting reinit.\n");
2790                         /* When init task contacts the PF and
2791                          * gets everything set up again, it'll restart the
2792                          * watchdog for us. Down, boy. Sit. Stay. Woof.
2793                          */
2794                         iavf_change_state(adapter, __IAVF_STARTUP);
2795                         adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2796                 }
2797                 adapter->aq_required = 0;
2798                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2799                 mutex_unlock(&adapter->crit_lock);
2800                 queue_delayed_work(iavf_wq,
2801                                    &adapter->watchdog_task,
2802                                    msecs_to_jiffies(10));
2803                 return;
2804         case __IAVF_RESETTING:
2805                 mutex_unlock(&adapter->crit_lock);
2806                 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2807                 return;
2808         case __IAVF_DOWN:
2809         case __IAVF_DOWN_PENDING:
2810         case __IAVF_TESTING:
2811         case __IAVF_RUNNING:
2812                 if (adapter->current_op) {
2813                         if (!iavf_asq_done(hw)) {
2814                                 dev_dbg(&adapter->pdev->dev,
2815                                         "Admin queue timeout\n");
2816                                 iavf_send_api_ver(adapter);
2817                         }
2818                 } else {
2819                         int ret = iavf_process_aq_command(adapter);
2820
2821                         /* An error will be returned if no commands were
2822                          * processed; use this opportunity to update stats
2823                          * if the error isn't -ENOTSUPP
2824                          */
2825                         if (ret && ret != -EOPNOTSUPP &&
2826                             adapter->state == __IAVF_RUNNING)
2827                                 iavf_request_stats(adapter);
2828                 }
2829                 if (adapter->state == __IAVF_RUNNING)
2830                         iavf_detect_recover_hung(&adapter->vsi);
2831                 break;
2832         case __IAVF_REMOVE:
2833         default:
2834                 mutex_unlock(&adapter->crit_lock);
2835                 return;
2836         }
2837
2838         /* check for hw reset */
2839         reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2840         if (!reg_val) {
2841                 adapter->flags |= IAVF_FLAG_RESET_PENDING;
2842                 adapter->aq_required = 0;
2843                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2844                 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2845                 queue_work(iavf_wq, &adapter->reset_task);
2846                 mutex_unlock(&adapter->crit_lock);
2847                 queue_delayed_work(iavf_wq,
2848                                    &adapter->watchdog_task, HZ * 2);
2849                 return;
2850         }
2851
2852         schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
2853         mutex_unlock(&adapter->crit_lock);
2854 restart_watchdog:
2855         if (adapter->state >= __IAVF_DOWN)
2856                 queue_work(iavf_wq, &adapter->adminq_task);
2857         if (adapter->aq_required)
2858                 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2859                                    msecs_to_jiffies(20));
2860         else
2861                 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2862 }
2863
2864 /**
2865  * iavf_disable_vf - disable VF
2866  * @adapter: board private structure
2867  *
2868  * Set communication failed flag and free all resources.
2869  * NOTE: This function is expected to be called with crit_lock being held.
2870  **/
2871 static void iavf_disable_vf(struct iavf_adapter *adapter)
2872 {
2873         struct iavf_mac_filter *f, *ftmp;
2874         struct iavf_vlan_filter *fv, *fvtmp;
2875         struct iavf_cloud_filter *cf, *cftmp;
2876
2877         adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2878
2879         /* We don't use netif_running() because it may be true prior to
2880          * ndo_open() returning, so we can't assume it means all our open
2881          * tasks have finished, since we're not holding the rtnl_lock here.
2882          */
2883         if (adapter->state == __IAVF_RUNNING) {
2884                 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2885                 netif_carrier_off(adapter->netdev);
2886                 netif_tx_disable(adapter->netdev);
2887                 adapter->link_up = false;
2888                 iavf_napi_disable_all(adapter);
2889                 iavf_irq_disable(adapter);
2890                 iavf_free_traffic_irqs(adapter);
2891                 iavf_free_all_tx_resources(adapter);
2892                 iavf_free_all_rx_resources(adapter);
2893         }
2894
2895         spin_lock_bh(&adapter->mac_vlan_list_lock);
2896
2897         /* Delete all of the filters */
2898         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2899                 list_del(&f->list);
2900                 kfree(f);
2901         }
2902
2903         list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2904                 list_del(&fv->list);
2905                 kfree(fv);
2906         }
2907
2908         spin_unlock_bh(&adapter->mac_vlan_list_lock);
2909
2910         spin_lock_bh(&adapter->cloud_filter_list_lock);
2911         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2912                 list_del(&cf->list);
2913                 kfree(cf);
2914                 adapter->num_cloud_filters--;
2915         }
2916         spin_unlock_bh(&adapter->cloud_filter_list_lock);
2917
2918         iavf_free_misc_irq(adapter);
2919         iavf_reset_interrupt_capability(adapter);
2920         iavf_free_q_vectors(adapter);
2921         iavf_free_queues(adapter);
2922         memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2923         iavf_shutdown_adminq(&adapter->hw);
2924         adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2925         iavf_change_state(adapter, __IAVF_DOWN);
2926         wake_up(&adapter->down_waitqueue);
2927         dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2928 }
2929
2930 /**
2931  * iavf_reset_task - Call-back task to handle hardware reset
2932  * @work: pointer to work_struct
2933  *
2934  * During reset we need to shut down and reinitialize the admin queue
2935  * before we can use it to communicate with the PF again. We also clear
2936  * and reinit the rings because that context is lost as well.
2937  **/
2938 static void iavf_reset_task(struct work_struct *work)
2939 {
2940         struct iavf_adapter *adapter = container_of(work,
2941                                                       struct iavf_adapter,
2942                                                       reset_task);
2943         struct virtchnl_vf_resource *vfres = adapter->vf_res;
2944         struct net_device *netdev = adapter->netdev;
2945         struct iavf_hw *hw = &adapter->hw;
2946         struct iavf_mac_filter *f, *ftmp;
2947         struct iavf_cloud_filter *cf;
2948         enum iavf_status status;
2949         u32 reg_val;
2950         int i = 0, err;
2951         bool running;
2952
2953         /* Detach interface to avoid subsequent NDO callbacks */
2954         rtnl_lock();
2955         netif_device_detach(netdev);
2956         rtnl_unlock();
2957
2958         /* When device is being removed it doesn't make sense to run the reset
2959          * task, just return in such a case.
2960          */
2961         if (!mutex_trylock(&adapter->crit_lock)) {
2962                 if (adapter->state != __IAVF_REMOVE)
2963                         queue_work(iavf_wq, &adapter->reset_task);
2964
2965                 goto reset_finish;
2966         }
2967
2968         while (!mutex_trylock(&adapter->client_lock))
2969                 usleep_range(500, 1000);
2970         if (CLIENT_ENABLED(adapter)) {
2971                 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
2972                                     IAVF_FLAG_CLIENT_NEEDS_CLOSE |
2973                                     IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
2974                                     IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
2975                 cancel_delayed_work_sync(&adapter->client_task);
2976                 iavf_notify_client_close(&adapter->vsi, true);
2977         }
2978         iavf_misc_irq_disable(adapter);
2979         if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2980                 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
2981                 /* Restart the AQ here. If we have been reset but didn't
2982                  * detect it, or if the PF had to reinit, our AQ will be hosed.
2983                  */
2984                 iavf_shutdown_adminq(hw);
2985                 iavf_init_adminq(hw);
2986                 iavf_request_reset(adapter);
2987         }
2988         adapter->flags |= IAVF_FLAG_RESET_PENDING;
2989
2990         /* poll until we see the reset actually happen */
2991         for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
2992                 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2993                           IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2994                 if (!reg_val)
2995                         break;
2996                 usleep_range(5000, 10000);
2997         }
2998         if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
2999                 dev_info(&adapter->pdev->dev, "Never saw reset\n");
3000                 goto continue_reset; /* act like the reset happened */
3001         }
3002
3003         /* wait until the reset is complete and the PF is responding to us */
3004         for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
3005                 /* sleep first to make sure a minimum wait time is met */
3006                 msleep(IAVF_RESET_WAIT_MS);
3007
3008                 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
3009                           IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3010                 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
3011                         break;
3012         }
3013
3014         pci_set_master(adapter->pdev);
3015         pci_restore_msi_state(adapter->pdev);
3016
3017         if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
3018                 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
3019                         reg_val);
3020                 iavf_disable_vf(adapter);
3021                 mutex_unlock(&adapter->client_lock);
3022                 mutex_unlock(&adapter->crit_lock);
3023                 if (netif_running(netdev)) {
3024                         rtnl_lock();
3025                         dev_close(netdev);
3026                         rtnl_unlock();
3027                 }
3028                 return; /* Do not attempt to reinit. It's dead, Jim. */
3029         }
3030
3031 continue_reset:
3032         /* We don't use netif_running() because it may be true prior to
3033          * ndo_open() returning, so we can't assume it means all our open
3034          * tasks have finished, since we're not holding the rtnl_lock here.
3035          */
3036         running = adapter->state == __IAVF_RUNNING;
3037
3038         if (running) {
3039                 netif_carrier_off(netdev);
3040                 netif_tx_stop_all_queues(netdev);
3041                 adapter->link_up = false;
3042                 iavf_napi_disable_all(adapter);
3043         }
3044         iavf_irq_disable(adapter);
3045
3046         iavf_change_state(adapter, __IAVF_RESETTING);
3047         adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
3048
3049         /* free the Tx/Rx rings and descriptors, might be better to just
3050          * re-use them sometime in the future
3051          */
3052         iavf_free_all_rx_resources(adapter);
3053         iavf_free_all_tx_resources(adapter);
3054
3055         adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
3056         /* kill and reinit the admin queue */
3057         iavf_shutdown_adminq(hw);
3058         adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3059         status = iavf_init_adminq(hw);
3060         if (status) {
3061                 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
3062                          status);
3063                 goto reset_err;
3064         }
3065         adapter->aq_required = 0;
3066
3067         if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3068             (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3069                 err = iavf_reinit_interrupt_scheme(adapter);
3070                 if (err)
3071                         goto reset_err;
3072         }
3073
3074         if (RSS_AQ(adapter)) {
3075                 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
3076         } else {
3077                 err = iavf_init_rss(adapter);
3078                 if (err)
3079                         goto reset_err;
3080         }
3081
3082         adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
3083         /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
3084          * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
3085          * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
3086          * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
3087          * been successfully sent and negotiated
3088          */
3089         adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
3090         adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
3091
3092         spin_lock_bh(&adapter->mac_vlan_list_lock);
3093
3094         /* Delete filter for the current MAC address, it could have
3095          * been changed by the PF via administratively set MAC.
3096          * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
3097          */
3098         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3099                 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
3100                         list_del(&f->list);
3101                         kfree(f);
3102                 }
3103         }
3104         /* re-add all MAC filters */
3105         list_for_each_entry(f, &adapter->mac_filter_list, list) {
3106                 f->add = true;
3107         }
3108         spin_unlock_bh(&adapter->mac_vlan_list_lock);
3109
3110         /* check if TCs are running and re-add all cloud filters */
3111         spin_lock_bh(&adapter->cloud_filter_list_lock);
3112         if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
3113             adapter->num_tc) {
3114                 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
3115                         cf->add = true;
3116                 }
3117         }
3118         spin_unlock_bh(&adapter->cloud_filter_list_lock);
3119
3120         adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
3121         adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3122         iavf_misc_irq_enable(adapter);
3123
3124         bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
3125         bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
3126
3127         mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
3128
3129         /* We were running when the reset started, so we need to restore some
3130          * state here.
3131          */
3132         if (running) {
3133                 /* allocate transmit descriptors */
3134                 err = iavf_setup_all_tx_resources(adapter);
3135                 if (err)
3136                         goto reset_err;
3137
3138                 /* allocate receive descriptors */
3139                 err = iavf_setup_all_rx_resources(adapter);
3140                 if (err)
3141                         goto reset_err;
3142
3143                 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3144                     (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3145                         err = iavf_request_traffic_irqs(adapter, netdev->name);
3146                         if (err)
3147                                 goto reset_err;
3148
3149                         adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
3150                 }
3151
3152                 iavf_configure(adapter);
3153
3154                 /* iavf_up_complete() will switch device back
3155                  * to __IAVF_RUNNING
3156                  */
3157                 iavf_up_complete(adapter);
3158
3159                 iavf_irq_enable(adapter, true);
3160         } else {
3161                 iavf_change_state(adapter, __IAVF_DOWN);
3162                 wake_up(&adapter->down_waitqueue);
3163         }
3164
3165         adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3166
3167         mutex_unlock(&adapter->client_lock);
3168         mutex_unlock(&adapter->crit_lock);
3169
3170         goto reset_finish;
3171 reset_err:
3172         if (running) {
3173                 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3174                 iavf_free_traffic_irqs(adapter);
3175         }
3176         iavf_disable_vf(adapter);
3177
3178         mutex_unlock(&adapter->client_lock);
3179         mutex_unlock(&adapter->crit_lock);
3180
3181         if (netif_running(netdev)) {
3182                 /* Close device to ensure that Tx queues will not be started
3183                  * during netif_device_attach() at the end of the reset task.
3184                  */
3185                 rtnl_lock();
3186                 dev_close(netdev);
3187                 rtnl_unlock();
3188         }
3189
3190         dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
3191 reset_finish:
3192         rtnl_lock();
3193         netif_device_attach(netdev);
3194         rtnl_unlock();
3195 }
3196
3197 /**
3198  * iavf_adminq_task - worker thread to clean the admin queue
3199  * @work: pointer to work_struct containing our data
3200  **/
3201 static void iavf_adminq_task(struct work_struct *work)
3202 {
3203         struct iavf_adapter *adapter =
3204                 container_of(work, struct iavf_adapter, adminq_task);
3205         struct iavf_hw *hw = &adapter->hw;
3206         struct iavf_arq_event_info event;
3207         enum virtchnl_ops v_op;
3208         enum iavf_status ret, v_ret;
3209         u32 val, oldval;
3210         u16 pending;
3211
3212         if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
3213                 goto out;
3214
3215         if (!mutex_trylock(&adapter->crit_lock)) {
3216                 if (adapter->state == __IAVF_REMOVE)
3217                         return;
3218
3219                 queue_work(iavf_wq, &adapter->adminq_task);
3220                 goto out;
3221         }
3222
3223         event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
3224         event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
3225         if (!event.msg_buf)
3226                 goto out;
3227
3228         do {
3229                 ret = iavf_clean_arq_element(hw, &event, &pending);
3230                 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
3231                 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
3232
3233                 if (ret || !v_op)
3234                         break; /* No event to process or error cleaning ARQ */
3235
3236                 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
3237                                          event.msg_len);
3238                 if (pending != 0)
3239                         memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
3240         } while (pending);
3241         mutex_unlock(&adapter->crit_lock);
3242
3243         if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
3244                 if (adapter->netdev_registered ||
3245                     !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
3246                         struct net_device *netdev = adapter->netdev;
3247
3248                         rtnl_lock();
3249                         netdev_update_features(netdev);
3250                         rtnl_unlock();
3251                         /* Request VLAN offload settings */
3252                         if (VLAN_V2_ALLOWED(adapter))
3253                                 iavf_set_vlan_offload_features
3254                                         (adapter, 0, netdev->features);
3255
3256                         iavf_set_queue_vlan_tag_loc(adapter);
3257                 }
3258
3259                 adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
3260         }
3261         if ((adapter->flags &
3262              (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
3263             adapter->state == __IAVF_RESETTING)
3264                 goto freedom;
3265
3266         /* check for error indications */
3267         val = rd32(hw, hw->aq.arq.len);
3268         if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
3269                 goto freedom;
3270         oldval = val;
3271         if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
3272                 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
3273                 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
3274         }
3275         if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
3276                 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
3277                 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
3278         }
3279         if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
3280                 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
3281                 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
3282         }
3283         if (oldval != val)
3284                 wr32(hw, hw->aq.arq.len, val);
3285
3286         val = rd32(hw, hw->aq.asq.len);
3287         oldval = val;
3288         if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
3289                 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
3290                 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
3291         }
3292         if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
3293                 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
3294                 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
3295         }
3296         if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
3297                 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
3298                 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
3299         }
3300         if (oldval != val)
3301                 wr32(hw, hw->aq.asq.len, val);
3302
3303 freedom:
3304         kfree(event.msg_buf);
3305 out:
3306         /* re-enable Admin queue interrupt cause */
3307         iavf_misc_irq_enable(adapter);
3308 }
3309
3310 /**
3311  * iavf_client_task - worker thread to perform client work
3312  * @work: pointer to work_struct containing our data
3313  *
3314  * This task handles client interactions. Because client calls can be
3315  * reentrant, we can't handle them in the watchdog.
3316  **/
3317 static void iavf_client_task(struct work_struct *work)
3318 {
3319         struct iavf_adapter *adapter =
3320                 container_of(work, struct iavf_adapter, client_task.work);
3321
3322         /* If we can't get the client bit, just give up. We'll be rescheduled
3323          * later.
3324          */
3325
3326         if (!mutex_trylock(&adapter->client_lock))
3327                 return;
3328
3329         if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
3330                 iavf_client_subtask(adapter);
3331                 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3332                 goto out;
3333         }
3334         if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
3335                 iavf_notify_client_l2_params(&adapter->vsi);
3336                 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
3337                 goto out;
3338         }
3339         if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
3340                 iavf_notify_client_close(&adapter->vsi, false);
3341                 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3342                 goto out;
3343         }
3344         if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
3345                 iavf_notify_client_open(&adapter->vsi);
3346                 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
3347         }
3348 out:
3349         mutex_unlock(&adapter->client_lock);
3350 }
3351
3352 /**
3353  * iavf_free_all_tx_resources - Free Tx Resources for All Queues
3354  * @adapter: board private structure
3355  *
3356  * Free all transmit software resources
3357  **/
3358 void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
3359 {
3360         int i;
3361
3362         if (!adapter->tx_rings)
3363                 return;
3364
3365         for (i = 0; i < adapter->num_active_queues; i++)
3366                 if (adapter->tx_rings[i].desc)
3367                         iavf_free_tx_resources(&adapter->tx_rings[i]);
3368 }
3369
3370 /**
3371  * iavf_setup_all_tx_resources - allocate all queues Tx resources
3372  * @adapter: board private structure
3373  *
3374  * If this function returns with an error, then it's possible one or
3375  * more of the rings is populated (while the rest are not).  It is the
3376  * callers duty to clean those orphaned rings.
3377  *
3378  * Return 0 on success, negative on failure
3379  **/
3380 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
3381 {
3382         int i, err = 0;
3383
3384         for (i = 0; i < adapter->num_active_queues; i++) {
3385                 adapter->tx_rings[i].count = adapter->tx_desc_count;
3386                 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
3387                 if (!err)
3388                         continue;
3389                 dev_err(&adapter->pdev->dev,
3390                         "Allocation for Tx Queue %u failed\n", i);
3391                 break;
3392         }
3393
3394         return err;
3395 }
3396
3397 /**
3398  * iavf_setup_all_rx_resources - allocate all queues Rx resources
3399  * @adapter: board private structure
3400  *
3401  * If this function returns with an error, then it's possible one or
3402  * more of the rings is populated (while the rest are not).  It is the
3403  * callers duty to clean those orphaned rings.
3404  *
3405  * Return 0 on success, negative on failure
3406  **/
3407 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
3408 {
3409         int i, err = 0;
3410
3411         for (i = 0; i < adapter->num_active_queues; i++) {
3412                 adapter->rx_rings[i].count = adapter->rx_desc_count;
3413                 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
3414                 if (!err)
3415                         continue;
3416                 dev_err(&adapter->pdev->dev,
3417                         "Allocation for Rx Queue %u failed\n", i);
3418                 break;
3419         }
3420         return err;
3421 }
3422
3423 /**
3424  * iavf_free_all_rx_resources - Free Rx Resources for All Queues
3425  * @adapter: board private structure
3426  *
3427  * Free all receive software resources
3428  **/
3429 void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
3430 {
3431         int i;
3432
3433         if (!adapter->rx_rings)
3434                 return;
3435
3436         for (i = 0; i < adapter->num_active_queues; i++)
3437                 if (adapter->rx_rings[i].desc)
3438                         iavf_free_rx_resources(&adapter->rx_rings[i]);
3439 }
3440
3441 /**
3442  * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
3443  * @adapter: board private structure
3444  * @max_tx_rate: max Tx bw for a tc
3445  **/
3446 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
3447                                       u64 max_tx_rate)
3448 {
3449         int speed = 0, ret = 0;
3450
3451         if (ADV_LINK_SUPPORT(adapter)) {
3452                 if (adapter->link_speed_mbps < U32_MAX) {
3453                         speed = adapter->link_speed_mbps;
3454                         goto validate_bw;
3455                 } else {
3456                         dev_err(&adapter->pdev->dev, "Unknown link speed\n");
3457                         return -EINVAL;
3458                 }
3459         }
3460
3461         switch (adapter->link_speed) {
3462         case VIRTCHNL_LINK_SPEED_40GB:
3463                 speed = SPEED_40000;
3464                 break;
3465         case VIRTCHNL_LINK_SPEED_25GB:
3466                 speed = SPEED_25000;
3467                 break;
3468         case VIRTCHNL_LINK_SPEED_20GB:
3469                 speed = SPEED_20000;
3470                 break;
3471         case VIRTCHNL_LINK_SPEED_10GB:
3472                 speed = SPEED_10000;
3473                 break;
3474         case VIRTCHNL_LINK_SPEED_5GB:
3475                 speed = SPEED_5000;
3476                 break;
3477         case VIRTCHNL_LINK_SPEED_2_5GB:
3478                 speed = SPEED_2500;
3479                 break;
3480         case VIRTCHNL_LINK_SPEED_1GB:
3481                 speed = SPEED_1000;
3482                 break;
3483         case VIRTCHNL_LINK_SPEED_100MB:
3484                 speed = SPEED_100;
3485                 break;
3486         default:
3487                 break;
3488         }
3489
3490 validate_bw:
3491         if (max_tx_rate > speed) {
3492                 dev_err(&adapter->pdev->dev,
3493                         "Invalid tx rate specified\n");
3494                 ret = -EINVAL;
3495         }
3496
3497         return ret;
3498 }
3499
3500 /**
3501  * iavf_validate_ch_config - validate queue mapping info
3502  * @adapter: board private structure
3503  * @mqprio_qopt: queue parameters
3504  *
3505  * This function validates if the config provided by the user to
3506  * configure queue channels is valid or not. Returns 0 on a valid
3507  * config.
3508  **/
3509 static int iavf_validate_ch_config(struct iavf_adapter *adapter,
3510                                    struct tc_mqprio_qopt_offload *mqprio_qopt)
3511 {
3512         u64 total_max_rate = 0;
3513         u32 tx_rate_rem = 0;
3514         int i, num_qps = 0;
3515         u64 tx_rate = 0;
3516         int ret = 0;
3517
3518         if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
3519             mqprio_qopt->qopt.num_tc < 1)
3520                 return -EINVAL;
3521
3522         for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
3523                 if (!mqprio_qopt->qopt.count[i] ||
3524                     mqprio_qopt->qopt.offset[i] != num_qps)
3525                         return -EINVAL;
3526                 if (mqprio_qopt->min_rate[i]) {
3527                         dev_err(&adapter->pdev->dev,
3528                                 "Invalid min tx rate (greater than 0) specified for TC%d\n",
3529                                 i);
3530                         return -EINVAL;
3531                 }
3532
3533                 /* convert to Mbps */
3534                 tx_rate = div_u64(mqprio_qopt->max_rate[i],
3535                                   IAVF_MBPS_DIVISOR);
3536
3537                 if (mqprio_qopt->max_rate[i] &&
3538                     tx_rate < IAVF_MBPS_QUANTA) {
3539                         dev_err(&adapter->pdev->dev,
3540                                 "Invalid max tx rate for TC%d, minimum %dMbps\n",
3541                                 i, IAVF_MBPS_QUANTA);
3542                         return -EINVAL;
3543                 }
3544
3545                 (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
3546
3547                 if (tx_rate_rem != 0) {
3548                         dev_err(&adapter->pdev->dev,
3549                                 "Invalid max tx rate for TC%d, not divisible by %d\n",
3550                                 i, IAVF_MBPS_QUANTA);
3551                         return -EINVAL;
3552                 }
3553
3554                 total_max_rate += tx_rate;
3555                 num_qps += mqprio_qopt->qopt.count[i];
3556         }
3557         if (num_qps > adapter->num_active_queues) {
3558                 dev_err(&adapter->pdev->dev,
3559                         "Cannot support requested number of queues\n");
3560                 return -EINVAL;
3561         }
3562
3563         ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
3564         return ret;
3565 }
3566
3567 /**
3568  * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
3569  * @adapter: board private structure
3570  **/
3571 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
3572 {
3573         struct iavf_cloud_filter *cf, *cftmp;
3574
3575         spin_lock_bh(&adapter->cloud_filter_list_lock);
3576         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
3577                                  list) {
3578                 list_del(&cf->list);
3579                 kfree(cf);
3580                 adapter->num_cloud_filters--;
3581         }
3582         spin_unlock_bh(&adapter->cloud_filter_list_lock);
3583 }
3584
3585 /**
3586  * __iavf_setup_tc - configure multiple traffic classes
3587  * @netdev: network interface device structure
3588  * @type_data: tc offload data
3589  *
3590  * This function processes the config information provided by the
3591  * user to configure traffic classes/queue channels and packages the
3592  * information to request the PF to setup traffic classes.
3593  *
3594  * Returns 0 on success.
3595  **/
3596 static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
3597 {
3598         struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
3599         struct iavf_adapter *adapter = netdev_priv(netdev);
3600         struct virtchnl_vf_resource *vfres = adapter->vf_res;
3601         u8 num_tc = 0, total_qps = 0;
3602         int ret = 0, netdev_tc = 0;
3603         u64 max_tx_rate;
3604         u16 mode;
3605         int i;
3606
3607         num_tc = mqprio_qopt->qopt.num_tc;
3608         mode = mqprio_qopt->mode;
3609
3610         /* delete queue_channel */
3611         if (!mqprio_qopt->qopt.hw) {
3612                 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
3613                         /* reset the tc configuration */
3614                         netdev_reset_tc(netdev);
3615                         adapter->num_tc = 0;
3616                         netif_tx_stop_all_queues(netdev);
3617                         netif_tx_disable(netdev);
3618                         iavf_del_all_cloud_filters(adapter);
3619                         adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
3620                         total_qps = adapter->orig_num_active_queues;
3621                         goto exit;
3622                 } else {
3623                         return -EINVAL;
3624                 }
3625         }
3626
3627         /* add queue channel */
3628         if (mode == TC_MQPRIO_MODE_CHANNEL) {
3629                 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3630                         dev_err(&adapter->pdev->dev, "ADq not supported\n");
3631                         return -EOPNOTSUPP;
3632                 }
3633                 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
3634                         dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
3635                         return -EINVAL;
3636                 }
3637
3638                 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
3639                 if (ret)
3640                         return ret;
3641                 /* Return if same TC config is requested */
3642                 if (adapter->num_tc == num_tc)
3643                         return 0;
3644                 adapter->num_tc = num_tc;
3645
3646                 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3647                         if (i < num_tc) {
3648                                 adapter->ch_config.ch_info[i].count =
3649                                         mqprio_qopt->qopt.count[i];
3650                                 adapter->ch_config.ch_info[i].offset =
3651                                         mqprio_qopt->qopt.offset[i];
3652                                 total_qps += mqprio_qopt->qopt.count[i];
3653                                 max_tx_rate = mqprio_qopt->max_rate[i];
3654                                 /* convert to Mbps */
3655                                 max_tx_rate = div_u64(max_tx_rate,
3656                                                       IAVF_MBPS_DIVISOR);
3657                                 adapter->ch_config.ch_info[i].max_tx_rate =
3658                                         max_tx_rate;
3659                         } else {
3660                                 adapter->ch_config.ch_info[i].count = 1;
3661                                 adapter->ch_config.ch_info[i].offset = 0;
3662                         }
3663                 }
3664
3665                 /* Take snapshot of original config such as "num_active_queues"
3666                  * It is used later when delete ADQ flow is exercised, so that
3667                  * once delete ADQ flow completes, VF shall go back to its
3668                  * original queue configuration
3669                  */
3670
3671                 adapter->orig_num_active_queues = adapter->num_active_queues;
3672
3673                 /* Store queue info based on TC so that VF gets configured
3674                  * with correct number of queues when VF completes ADQ config
3675                  * flow
3676                  */
3677                 adapter->ch_config.total_qps = total_qps;
3678
3679                 netif_tx_stop_all_queues(netdev);
3680                 netif_tx_disable(netdev);
3681                 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
3682                 netdev_reset_tc(netdev);
3683                 /* Report the tc mapping up the stack */
3684                 netdev_set_num_tc(adapter->netdev, num_tc);
3685                 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3686                         u16 qcount = mqprio_qopt->qopt.count[i];
3687                         u16 qoffset = mqprio_qopt->qopt.offset[i];
3688
3689                         if (i < num_tc)
3690                                 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
3691                                                     qoffset);
3692                 }
3693         }
3694 exit:
3695         if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
3696                 return 0;
3697
3698         netif_set_real_num_rx_queues(netdev, total_qps);
3699         netif_set_real_num_tx_queues(netdev, total_qps);
3700
3701         return ret;
3702 }
3703
3704 /**
3705  * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
3706  * @adapter: board private structure
3707  * @f: pointer to struct flow_cls_offload
3708  * @filter: pointer to cloud filter structure
3709  */
3710 static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
3711                                  struct flow_cls_offload *f,
3712                                  struct iavf_cloud_filter *filter)
3713 {
3714         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3715         struct flow_dissector *dissector = rule->match.dissector;
3716         u16 n_proto_mask = 0;
3717         u16 n_proto_key = 0;
3718         u8 field_flags = 0;
3719         u16 addr_type = 0;
3720         u16 n_proto = 0;
3721         int i = 0;
3722         struct virtchnl_filter *vf = &filter->f;
3723
3724         if (dissector->used_keys &
3725             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
3726               BIT(FLOW_DISSECTOR_KEY_BASIC) |
3727               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
3728               BIT(FLOW_DISSECTOR_KEY_VLAN) |
3729               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
3730               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
3731               BIT(FLOW_DISSECTOR_KEY_PORTS) |
3732               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
3733                 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
3734                         dissector->used_keys);
3735                 return -EOPNOTSUPP;
3736         }
3737
3738         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
3739                 struct flow_match_enc_keyid match;
3740
3741                 flow_rule_match_enc_keyid(rule, &match);
3742                 if (match.mask->keyid != 0)
3743                         field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
3744         }
3745
3746         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
3747                 struct flow_match_basic match;
3748
3749                 flow_rule_match_basic(rule, &match);
3750                 n_proto_key = ntohs(match.key->n_proto);
3751                 n_proto_mask = ntohs(match.mask->n_proto);
3752
3753                 if (n_proto_key == ETH_P_ALL) {
3754                         n_proto_key = 0;
3755                         n_proto_mask = 0;
3756                 }
3757                 n_proto = n_proto_key & n_proto_mask;
3758                 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
3759                         return -EINVAL;
3760                 if (n_proto == ETH_P_IPV6) {
3761                         /* specify flow type as TCP IPv6 */
3762                         vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
3763                 }
3764
3765                 if (match.key->ip_proto != IPPROTO_TCP) {
3766                         dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
3767                         return -EINVAL;
3768                 }
3769         }
3770
3771         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
3772                 struct flow_match_eth_addrs match;
3773
3774                 flow_rule_match_eth_addrs(rule, &match);
3775
3776                 /* use is_broadcast and is_zero to check for all 0xf or 0 */
3777                 if (!is_zero_ether_addr(match.mask->dst)) {
3778                         if (is_broadcast_ether_addr(match.mask->dst)) {
3779                                 field_flags |= IAVF_CLOUD_FIELD_OMAC;
3780                         } else {
3781                                 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
3782                                         match.mask->dst);
3783                                 return -EINVAL;
3784                         }
3785                 }
3786
3787                 if (!is_zero_ether_addr(match.mask->src)) {
3788                         if (is_broadcast_ether_addr(match.mask->src)) {
3789                                 field_flags |= IAVF_CLOUD_FIELD_IMAC;
3790                         } else {
3791                                 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
3792                                         match.mask->src);
3793                                 return -EINVAL;
3794                         }
3795                 }
3796
3797                 if (!is_zero_ether_addr(match.key->dst))
3798                         if (is_valid_ether_addr(match.key->dst) ||
3799                             is_multicast_ether_addr(match.key->dst)) {
3800                                 /* set the mask if a valid dst_mac address */
3801                                 for (i = 0; i < ETH_ALEN; i++)
3802                                         vf->mask.tcp_spec.dst_mac[i] |= 0xff;
3803                                 ether_addr_copy(vf->data.tcp_spec.dst_mac,
3804                                                 match.key->dst);
3805                         }
3806
3807                 if (!is_zero_ether_addr(match.key->src))
3808                         if (is_valid_ether_addr(match.key->src) ||
3809                             is_multicast_ether_addr(match.key->src)) {
3810                                 /* set the mask if a valid dst_mac address */
3811                                 for (i = 0; i < ETH_ALEN; i++)
3812                                         vf->mask.tcp_spec.src_mac[i] |= 0xff;
3813                                 ether_addr_copy(vf->data.tcp_spec.src_mac,
3814                                                 match.key->src);
3815                 }
3816         }
3817
3818         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
3819                 struct flow_match_vlan match;
3820
3821                 flow_rule_match_vlan(rule, &match);
3822                 if (match.mask->vlan_id) {
3823                         if (match.mask->vlan_id == VLAN_VID_MASK) {
3824                                 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
3825                         } else {
3826                                 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
3827                                         match.mask->vlan_id);
3828                                 return -EINVAL;
3829                         }
3830                 }
3831                 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
3832                 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
3833         }
3834
3835         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
3836                 struct flow_match_control match;
3837
3838                 flow_rule_match_control(rule, &match);
3839                 addr_type = match.key->addr_type;
3840         }
3841
3842         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
3843                 struct flow_match_ipv4_addrs match;
3844
3845                 flow_rule_match_ipv4_addrs(rule, &match);
3846                 if (match.mask->dst) {
3847                         if (match.mask->dst == cpu_to_be32(0xffffffff)) {
3848                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
3849                         } else {
3850                                 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
3851                                         be32_to_cpu(match.mask->dst));
3852                                 return -EINVAL;
3853                         }
3854                 }
3855
3856                 if (match.mask->src) {
3857                         if (match.mask->src == cpu_to_be32(0xffffffff)) {
3858                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
3859                         } else {
3860                                 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
3861                                         be32_to_cpu(match.mask->dst));
3862                                 return -EINVAL;
3863                         }
3864                 }
3865
3866                 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
3867                         dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
3868                         return -EINVAL;
3869                 }
3870                 if (match.key->dst) {
3871                         vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
3872                         vf->data.tcp_spec.dst_ip[0] = match.key->dst;
3873                 }
3874                 if (match.key->src) {
3875                         vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
3876                         vf->data.tcp_spec.src_ip[0] = match.key->src;
3877                 }
3878         }
3879
3880         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
3881                 struct flow_match_ipv6_addrs match;
3882
3883                 flow_rule_match_ipv6_addrs(rule, &match);
3884
3885                 /* validate mask, make sure it is not IPV6_ADDR_ANY */
3886                 if (ipv6_addr_any(&match.mask->dst)) {
3887                         dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
3888                                 IPV6_ADDR_ANY);
3889                         return -EINVAL;
3890                 }
3891
3892                 /* src and dest IPv6 address should not be LOOPBACK
3893                  * (0:0:0:0:0:0:0:1) which can be represented as ::1
3894                  */
3895                 if (ipv6_addr_loopback(&match.key->dst) ||
3896                     ipv6_addr_loopback(&match.key->src)) {
3897                         dev_err(&adapter->pdev->dev,
3898                                 "ipv6 addr should not be loopback\n");
3899                         return -EINVAL;
3900                 }
3901                 if (!ipv6_addr_any(&match.mask->dst) ||
3902                     !ipv6_addr_any(&match.mask->src))
3903                         field_flags |= IAVF_CLOUD_FIELD_IIP;
3904
3905                 for (i = 0; i < 4; i++)
3906                         vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
3907                 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
3908                        sizeof(vf->data.tcp_spec.dst_ip));
3909                 for (i = 0; i < 4; i++)
3910                         vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
3911                 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
3912                        sizeof(vf->data.tcp_spec.src_ip));
3913         }
3914         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
3915                 struct flow_match_ports match;
3916
3917                 flow_rule_match_ports(rule, &match);
3918                 if (match.mask->src) {
3919                         if (match.mask->src == cpu_to_be16(0xffff)) {
3920                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
3921                         } else {
3922                                 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
3923                                         be16_to_cpu(match.mask->src));
3924                                 return -EINVAL;
3925                         }
3926                 }
3927
3928                 if (match.mask->dst) {
3929                         if (match.mask->dst == cpu_to_be16(0xffff)) {
3930                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
3931                         } else {
3932                                 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
3933                                         be16_to_cpu(match.mask->dst));
3934                                 return -EINVAL;
3935                         }
3936                 }
3937                 if (match.key->dst) {
3938                         vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
3939                         vf->data.tcp_spec.dst_port = match.key->dst;
3940                 }
3941
3942                 if (match.key->src) {
3943                         vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
3944                         vf->data.tcp_spec.src_port = match.key->src;
3945                 }
3946         }
3947         vf->field_flags = field_flags;
3948
3949         return 0;
3950 }
3951
3952 /**
3953  * iavf_handle_tclass - Forward to a traffic class on the device
3954  * @adapter: board private structure
3955  * @tc: traffic class index on the device
3956  * @filter: pointer to cloud filter structure
3957  */
3958 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
3959                               struct iavf_cloud_filter *filter)
3960 {
3961         if (tc == 0)
3962                 return 0;
3963         if (tc < adapter->num_tc) {
3964                 if (!filter->f.data.tcp_spec.dst_port) {
3965                         dev_err(&adapter->pdev->dev,
3966                                 "Specify destination port to redirect to traffic class other than TC0\n");
3967                         return -EINVAL;
3968                 }
3969         }
3970         /* redirect to a traffic class on the same device */
3971         filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
3972         filter->f.action_meta = tc;
3973         return 0;
3974 }
3975
3976 /**
3977  * iavf_find_cf - Find the cloud filter in the list
3978  * @adapter: Board private structure
3979  * @cookie: filter specific cookie
3980  *
3981  * Returns ptr to the filter object or NULL. Must be called while holding the
3982  * cloud_filter_list_lock.
3983  */
3984 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
3985                                               unsigned long *cookie)
3986 {
3987         struct iavf_cloud_filter *filter = NULL;
3988
3989         if (!cookie)
3990                 return NULL;
3991
3992         list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
3993                 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
3994                         return filter;
3995         }
3996         return NULL;
3997 }
3998
3999 /**
4000  * iavf_configure_clsflower - Add tc flower filters
4001  * @adapter: board private structure
4002  * @cls_flower: Pointer to struct flow_cls_offload
4003  */
4004 static int iavf_configure_clsflower(struct iavf_adapter *adapter,
4005                                     struct flow_cls_offload *cls_flower)
4006 {
4007         int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
4008         struct iavf_cloud_filter *filter = NULL;
4009         int err = -EINVAL, count = 50;
4010
4011         if (tc < 0) {
4012                 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
4013                 return -EINVAL;
4014         }
4015
4016         filter = kzalloc(sizeof(*filter), GFP_KERNEL);
4017         if (!filter)
4018                 return -ENOMEM;
4019
4020         while (!mutex_trylock(&adapter->crit_lock)) {
4021                 if (--count == 0) {
4022                         kfree(filter);
4023                         return err;
4024                 }
4025                 udelay(1);
4026         }
4027
4028         filter->cookie = cls_flower->cookie;
4029
4030         /* bail out here if filter already exists */
4031         spin_lock_bh(&adapter->cloud_filter_list_lock);
4032         if (iavf_find_cf(adapter, &cls_flower->cookie)) {
4033                 dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
4034                 err = -EEXIST;
4035                 goto spin_unlock;
4036         }
4037         spin_unlock_bh(&adapter->cloud_filter_list_lock);
4038
4039         /* set the mask to all zeroes to begin with */
4040         memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
4041         /* start out with flow type and eth type IPv4 to begin with */
4042         filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
4043         err = iavf_parse_cls_flower(adapter, cls_flower, filter);
4044         if (err)
4045                 goto err;
4046
4047         err = iavf_handle_tclass(adapter, tc, filter);
4048         if (err)
4049                 goto err;
4050
4051         /* add filter to the list */
4052         spin_lock_bh(&adapter->cloud_filter_list_lock);
4053         list_add_tail(&filter->list, &adapter->cloud_filter_list);
4054         adapter->num_cloud_filters++;
4055         filter->add = true;
4056         adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
4057 spin_unlock:
4058         spin_unlock_bh(&adapter->cloud_filter_list_lock);
4059 err:
4060         if (err)
4061                 kfree(filter);
4062
4063         mutex_unlock(&adapter->crit_lock);
4064         return err;
4065 }
4066
4067 /**
4068  * iavf_delete_clsflower - Remove tc flower filters
4069  * @adapter: board private structure
4070  * @cls_flower: Pointer to struct flow_cls_offload
4071  */
4072 static int iavf_delete_clsflower(struct iavf_adapter *adapter,
4073                                  struct flow_cls_offload *cls_flower)
4074 {
4075         struct iavf_cloud_filter *filter = NULL;
4076         int err = 0;
4077
4078         spin_lock_bh(&adapter->cloud_filter_list_lock);
4079         filter = iavf_find_cf(adapter, &cls_flower->cookie);
4080         if (filter) {
4081                 filter->del = true;
4082                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
4083         } else {
4084                 err = -EINVAL;
4085         }
4086         spin_unlock_bh(&adapter->cloud_filter_list_lock);
4087
4088         return err;
4089 }
4090
4091 /**
4092  * iavf_setup_tc_cls_flower - flower classifier offloads
4093  * @adapter: board private structure
4094  * @cls_flower: pointer to flow_cls_offload struct with flow info
4095  */
4096 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
4097                                     struct flow_cls_offload *cls_flower)
4098 {
4099         switch (cls_flower->command) {
4100         case FLOW_CLS_REPLACE:
4101                 return iavf_configure_clsflower(adapter, cls_flower);
4102         case FLOW_CLS_DESTROY:
4103                 return iavf_delete_clsflower(adapter, cls_flower);
4104         case FLOW_CLS_STATS:
4105                 return -EOPNOTSUPP;
4106         default:
4107                 return -EOPNOTSUPP;
4108         }
4109 }
4110
4111 /**
4112  * iavf_setup_tc_block_cb - block callback for tc
4113  * @type: type of offload
4114  * @type_data: offload data
4115  * @cb_priv:
4116  *
4117  * This function is the block callback for traffic classes
4118  **/
4119 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4120                                   void *cb_priv)
4121 {
4122         struct iavf_adapter *adapter = cb_priv;
4123
4124         if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
4125                 return -EOPNOTSUPP;
4126
4127         switch (type) {
4128         case TC_SETUP_CLSFLOWER:
4129                 return iavf_setup_tc_cls_flower(cb_priv, type_data);
4130         default:
4131                 return -EOPNOTSUPP;
4132         }
4133 }
4134
4135 static LIST_HEAD(iavf_block_cb_list);
4136
4137 /**
4138  * iavf_setup_tc - configure multiple traffic classes
4139  * @netdev: network interface device structure
4140  * @type: type of offload
4141  * @type_data: tc offload data
4142  *
4143  * This function is the callback to ndo_setup_tc in the
4144  * netdev_ops.
4145  *
4146  * Returns 0 on success
4147  **/
4148 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
4149                          void *type_data)
4150 {
4151         struct iavf_adapter *adapter = netdev_priv(netdev);
4152
4153         switch (type) {
4154         case TC_SETUP_QDISC_MQPRIO:
4155                 return __iavf_setup_tc(netdev, type_data);
4156         case TC_SETUP_BLOCK:
4157                 return flow_block_cb_setup_simple(type_data,
4158                                                   &iavf_block_cb_list,
4159                                                   iavf_setup_tc_block_cb,
4160                                                   adapter, adapter, true);
4161         default:
4162                 return -EOPNOTSUPP;
4163         }
4164 }
4165
4166 /**
4167  * iavf_open - Called when a network interface is made active
4168  * @netdev: network interface device structure
4169  *
4170  * Returns 0 on success, negative value on failure
4171  *
4172  * The open entry point is called when a network interface is made
4173  * active by the system (IFF_UP).  At this point all resources needed
4174  * for transmit and receive operations are allocated, the interrupt
4175  * handler is registered with the OS, the watchdog is started,
4176  * and the stack is notified that the interface is ready.
4177  **/
4178 static int iavf_open(struct net_device *netdev)
4179 {
4180         struct iavf_adapter *adapter = netdev_priv(netdev);
4181         int err;
4182
4183         if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
4184                 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
4185                 return -EIO;
4186         }
4187
4188         while (!mutex_trylock(&adapter->crit_lock)) {
4189                 /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
4190                  * is already taken and iavf_open is called from an upper
4191                  * device's notifier reacting on NETDEV_REGISTER event.
4192                  * We have to leave here to avoid dead lock.
4193                  */
4194                 if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
4195                         return -EBUSY;
4196
4197                 usleep_range(500, 1000);
4198         }
4199
4200         if (adapter->state != __IAVF_DOWN) {
4201                 err = -EBUSY;
4202                 goto err_unlock;
4203         }
4204
4205         if (adapter->state == __IAVF_RUNNING &&
4206             !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
4207                 dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
4208                 err = 0;
4209                 goto err_unlock;
4210         }
4211
4212         /* allocate transmit descriptors */
4213         err = iavf_setup_all_tx_resources(adapter);
4214         if (err)
4215                 goto err_setup_tx;
4216
4217         /* allocate receive descriptors */
4218         err = iavf_setup_all_rx_resources(adapter);
4219         if (err)
4220                 goto err_setup_rx;
4221
4222         /* clear any pending interrupts, may auto mask */
4223         err = iavf_request_traffic_irqs(adapter, netdev->name);
4224         if (err)
4225                 goto err_req_irq;
4226
4227         spin_lock_bh(&adapter->mac_vlan_list_lock);
4228
4229         iavf_add_filter(adapter, adapter->hw.mac.addr);
4230
4231         spin_unlock_bh(&adapter->mac_vlan_list_lock);
4232
4233         /* Restore VLAN filters that were removed with IFF_DOWN */
4234         iavf_restore_filters(adapter);
4235
4236         iavf_configure(adapter);
4237
4238         iavf_up_complete(adapter);
4239
4240         iavf_irq_enable(adapter, true);
4241
4242         mutex_unlock(&adapter->crit_lock);
4243
4244         return 0;
4245
4246 err_req_irq:
4247         iavf_down(adapter);
4248         iavf_free_traffic_irqs(adapter);
4249 err_setup_rx:
4250         iavf_free_all_rx_resources(adapter);
4251 err_setup_tx:
4252         iavf_free_all_tx_resources(adapter);
4253 err_unlock:
4254         mutex_unlock(&adapter->crit_lock);
4255
4256         return err;
4257 }
4258
4259 /**
4260  * iavf_close - Disables a network interface
4261  * @netdev: network interface device structure
4262  *
4263  * Returns 0, this is not allowed to fail
4264  *
4265  * The close entry point is called when an interface is de-activated
4266  * by the OS.  The hardware is still under the drivers control, but
4267  * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
4268  * are freed, along with all transmit and receive resources.
4269  **/
4270 static int iavf_close(struct net_device *netdev)
4271 {
4272         struct iavf_adapter *adapter = netdev_priv(netdev);
4273         u64 aq_to_restore;
4274         int status;
4275
4276         mutex_lock(&adapter->crit_lock);
4277
4278         if (adapter->state <= __IAVF_DOWN_PENDING) {
4279                 mutex_unlock(&adapter->crit_lock);
4280                 return 0;
4281         }
4282
4283         set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
4284         if (CLIENT_ENABLED(adapter))
4285                 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
4286         /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
4287          * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
4288          * deadlock with adminq_task() until iavf_close timeouts. We must send
4289          * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
4290          * disable queues possible for vf. Give only necessary flags to
4291          * iavf_down and save other to set them right before iavf_close()
4292          * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
4293          * iavf will be in DOWN state.
4294          */
4295         aq_to_restore = adapter->aq_required;
4296         adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
4297
4298         /* Remove flags which we do not want to send after close or we want to
4299          * send before disable queues.
4300          */
4301         aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG              |
4302                            IAVF_FLAG_AQ_ENABLE_QUEUES           |
4303                            IAVF_FLAG_AQ_CONFIGURE_QUEUES        |
4304                            IAVF_FLAG_AQ_ADD_VLAN_FILTER         |
4305                            IAVF_FLAG_AQ_ADD_MAC_FILTER          |
4306                            IAVF_FLAG_AQ_ADD_CLOUD_FILTER        |
4307                            IAVF_FLAG_AQ_ADD_FDIR_FILTER         |
4308                            IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
4309
4310         iavf_down(adapter);
4311         iavf_change_state(adapter, __IAVF_DOWN_PENDING);
4312         iavf_free_traffic_irqs(adapter);
4313
4314         mutex_unlock(&adapter->crit_lock);
4315
4316         /* We explicitly don't free resources here because the hardware is
4317          * still active and can DMA into memory. Resources are cleared in
4318          * iavf_virtchnl_completion() after we get confirmation from the PF
4319          * driver that the rings have been stopped.
4320          *
4321          * Also, we wait for state to transition to __IAVF_DOWN before
4322          * returning. State change occurs in iavf_virtchnl_completion() after
4323          * VF resources are released (which occurs after PF driver processes and
4324          * responds to admin queue commands).
4325          */
4326
4327         status = wait_event_timeout(adapter->down_waitqueue,
4328                                     adapter->state == __IAVF_DOWN,
4329                                     msecs_to_jiffies(500));
4330         if (!status)
4331                 netdev_warn(netdev, "Device resources not yet released\n");
4332
4333         mutex_lock(&adapter->crit_lock);
4334         adapter->aq_required |= aq_to_restore;
4335         mutex_unlock(&adapter->crit_lock);
4336         return 0;
4337 }
4338
4339 /**
4340  * iavf_change_mtu - Change the Maximum Transfer Unit
4341  * @netdev: network interface device structure
4342  * @new_mtu: new value for maximum frame size
4343  *
4344  * Returns 0 on success, negative on failure
4345  **/
4346 static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
4347 {
4348         struct iavf_adapter *adapter = netdev_priv(netdev);
4349
4350         netdev_dbg(netdev, "changing MTU from %d to %d\n",
4351                    netdev->mtu, new_mtu);
4352         netdev->mtu = new_mtu;
4353         if (CLIENT_ENABLED(adapter)) {
4354                 iavf_notify_client_l2_params(&adapter->vsi);
4355                 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
4356         }
4357
4358         if (netif_running(netdev)) {
4359                 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
4360                 queue_work(iavf_wq, &adapter->reset_task);
4361         }
4362
4363         return 0;
4364 }
4365
4366 #define NETIF_VLAN_OFFLOAD_FEATURES     (NETIF_F_HW_VLAN_CTAG_RX | \
4367                                          NETIF_F_HW_VLAN_CTAG_TX | \
4368                                          NETIF_F_HW_VLAN_STAG_RX | \
4369                                          NETIF_F_HW_VLAN_STAG_TX)
4370
4371 /**
4372  * iavf_set_features - set the netdev feature flags
4373  * @netdev: ptr to the netdev being adjusted
4374  * @features: the feature set that the stack is suggesting
4375  * Note: expects to be called while under rtnl_lock()
4376  **/
4377 static int iavf_set_features(struct net_device *netdev,
4378                              netdev_features_t features)
4379 {
4380         struct iavf_adapter *adapter = netdev_priv(netdev);
4381
4382         /* trigger update on any VLAN feature change */
4383         if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
4384             (features & NETIF_VLAN_OFFLOAD_FEATURES))
4385                 iavf_set_vlan_offload_features(adapter, netdev->features,
4386                                                features);
4387
4388         return 0;
4389 }
4390
4391 /**
4392  * iavf_features_check - Validate encapsulated packet conforms to limits
4393  * @skb: skb buff
4394  * @dev: This physical port's netdev
4395  * @features: Offload features that the stack believes apply
4396  **/
4397 static netdev_features_t iavf_features_check(struct sk_buff *skb,
4398                                              struct net_device *dev,
4399                                              netdev_features_t features)
4400 {
4401         size_t len;
4402
4403         /* No point in doing any of this if neither checksum nor GSO are
4404          * being requested for this frame.  We can rule out both by just
4405          * checking for CHECKSUM_PARTIAL
4406          */
4407         if (skb->ip_summed != CHECKSUM_PARTIAL)
4408                 return features;
4409
4410         /* We cannot support GSO if the MSS is going to be less than
4411          * 64 bytes.  If it is then we need to drop support for GSO.
4412          */
4413         if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
4414                 features &= ~NETIF_F_GSO_MASK;
4415
4416         /* MACLEN can support at most 63 words */
4417         len = skb_network_header(skb) - skb->data;
4418         if (len & ~(63 * 2))
4419                 goto out_err;
4420
4421         /* IPLEN and EIPLEN can support at most 127 dwords */
4422         len = skb_transport_header(skb) - skb_network_header(skb);
4423         if (len & ~(127 * 4))
4424                 goto out_err;
4425
4426         if (skb->encapsulation) {
4427                 /* L4TUNLEN can support 127 words */
4428                 len = skb_inner_network_header(skb) - skb_transport_header(skb);
4429                 if (len & ~(127 * 2))
4430                         goto out_err;
4431
4432                 /* IPLEN can support at most 127 dwords */
4433                 len = skb_inner_transport_header(skb) -
4434                       skb_inner_network_header(skb);
4435                 if (len & ~(127 * 4))
4436                         goto out_err;
4437         }
4438
4439         /* No need to validate L4LEN as TCP is the only protocol with a
4440          * flexible value and we support all possible values supported
4441          * by TCP, which is at most 15 dwords
4442          */
4443
4444         return features;
4445 out_err:
4446         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4447 }
4448
4449 /**
4450  * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
4451  * @adapter: board private structure
4452  *
4453  * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4454  * were negotiated determine the VLAN features that can be toggled on and off.
4455  **/
4456 static netdev_features_t
4457 iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
4458 {
4459         netdev_features_t hw_features = 0;
4460
4461         if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4462                 return hw_features;
4463
4464         /* Enable VLAN features if supported */
4465         if (VLAN_ALLOWED(adapter)) {
4466                 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
4467                                 NETIF_F_HW_VLAN_CTAG_RX);
4468         } else if (VLAN_V2_ALLOWED(adapter)) {
4469                 struct virtchnl_vlan_caps *vlan_v2_caps =
4470                         &adapter->vlan_v2_caps;
4471                 struct virtchnl_vlan_supported_caps *stripping_support =
4472                         &vlan_v2_caps->offloads.stripping_support;
4473                 struct virtchnl_vlan_supported_caps *insertion_support =
4474                         &vlan_v2_caps->offloads.insertion_support;
4475
4476                 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4477                     stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4478                         if (stripping_support->outer &
4479                             VIRTCHNL_VLAN_ETHERTYPE_8100)
4480                                 hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4481                         if (stripping_support->outer &
4482                             VIRTCHNL_VLAN_ETHERTYPE_88A8)
4483                                 hw_features |= NETIF_F_HW_VLAN_STAG_RX;
4484                 } else if (stripping_support->inner !=
4485                            VIRTCHNL_VLAN_UNSUPPORTED &&
4486                            stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4487                         if (stripping_support->inner &
4488                             VIRTCHNL_VLAN_ETHERTYPE_8100)
4489                                 hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4490                 }
4491
4492                 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4493                     insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4494                         if (insertion_support->outer &
4495                             VIRTCHNL_VLAN_ETHERTYPE_8100)
4496                                 hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4497                         if (insertion_support->outer &
4498                             VIRTCHNL_VLAN_ETHERTYPE_88A8)
4499                                 hw_features |= NETIF_F_HW_VLAN_STAG_TX;
4500                 } else if (insertion_support->inner &&
4501                            insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4502                         if (insertion_support->inner &
4503                             VIRTCHNL_VLAN_ETHERTYPE_8100)
4504                                 hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4505                 }
4506         }
4507
4508         return hw_features;
4509 }
4510
4511 /**
4512  * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
4513  * @adapter: board private structure
4514  *
4515  * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4516  * were negotiated determine the VLAN features that are enabled by default.
4517  **/
4518 static netdev_features_t
4519 iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
4520 {
4521         netdev_features_t features = 0;
4522
4523         if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4524                 return features;
4525
4526         if (VLAN_ALLOWED(adapter)) {
4527                 features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4528                         NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
4529         } else if (VLAN_V2_ALLOWED(adapter)) {
4530                 struct virtchnl_vlan_caps *vlan_v2_caps =
4531                         &adapter->vlan_v2_caps;
4532                 struct virtchnl_vlan_supported_caps *filtering_support =
4533                         &vlan_v2_caps->filtering.filtering_support;
4534                 struct virtchnl_vlan_supported_caps *stripping_support =
4535                         &vlan_v2_caps->offloads.stripping_support;
4536                 struct virtchnl_vlan_supported_caps *insertion_support =
4537                         &vlan_v2_caps->offloads.insertion_support;
4538                 u32 ethertype_init;
4539
4540                 /* give priority to outer stripping and don't support both outer
4541                  * and inner stripping
4542                  */
4543                 ethertype_init = vlan_v2_caps->offloads.ethertype_init;
4544                 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4545                         if (stripping_support->outer &
4546                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4547                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4548                                 features |= NETIF_F_HW_VLAN_CTAG_RX;
4549                         else if (stripping_support->outer &
4550                                  VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4551                                  ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4552                                 features |= NETIF_F_HW_VLAN_STAG_RX;
4553                 } else if (stripping_support->inner !=
4554                            VIRTCHNL_VLAN_UNSUPPORTED) {
4555                         if (stripping_support->inner &
4556                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4557                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4558                                 features |= NETIF_F_HW_VLAN_CTAG_RX;
4559                 }
4560
4561                 /* give priority to outer insertion and don't support both outer
4562                  * and inner insertion
4563                  */
4564                 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4565                         if (insertion_support->outer &
4566                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4567                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4568                                 features |= NETIF_F_HW_VLAN_CTAG_TX;
4569                         else if (insertion_support->outer &
4570                                  VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4571                                  ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4572                                 features |= NETIF_F_HW_VLAN_STAG_TX;
4573                 } else if (insertion_support->inner !=
4574                            VIRTCHNL_VLAN_UNSUPPORTED) {
4575                         if (insertion_support->inner &
4576                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4577                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4578                                 features |= NETIF_F_HW_VLAN_CTAG_TX;
4579                 }
4580
4581                 /* give priority to outer filtering and don't bother if both
4582                  * outer and inner filtering are enabled
4583                  */
4584                 ethertype_init = vlan_v2_caps->filtering.ethertype_init;
4585                 if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4586                         if (filtering_support->outer &
4587                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4588                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4589                                 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4590                         if (filtering_support->outer &
4591                             VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4592                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4593                                 features |= NETIF_F_HW_VLAN_STAG_FILTER;
4594                 } else if (filtering_support->inner !=
4595                            VIRTCHNL_VLAN_UNSUPPORTED) {
4596                         if (filtering_support->inner &
4597                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4598                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4599                                 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4600                         if (filtering_support->inner &
4601                             VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4602                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4603                                 features |= NETIF_F_HW_VLAN_STAG_FILTER;
4604                 }
4605         }
4606
4607         return features;
4608 }
4609
4610 #define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
4611         (!(((requested) & (feature_bit)) && \
4612            !((allowed) & (feature_bit))))
4613
4614 /**
4615  * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
4616  * @adapter: board private structure
4617  * @requested_features: stack requested NETDEV features
4618  **/
4619 static netdev_features_t
4620 iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
4621                               netdev_features_t requested_features)
4622 {
4623         netdev_features_t allowed_features;
4624
4625         allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
4626                 iavf_get_netdev_vlan_features(adapter);
4627
4628         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4629                                               allowed_features,
4630                                               NETIF_F_HW_VLAN_CTAG_TX))
4631                 requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4632
4633         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4634                                               allowed_features,
4635                                               NETIF_F_HW_VLAN_CTAG_RX))
4636                 requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
4637
4638         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4639                                               allowed_features,
4640                                               NETIF_F_HW_VLAN_STAG_TX))
4641                 requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
4642         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4643                                               allowed_features,
4644                                               NETIF_F_HW_VLAN_STAG_RX))
4645                 requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
4646
4647         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4648                                               allowed_features,
4649                                               NETIF_F_HW_VLAN_CTAG_FILTER))
4650                 requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4651
4652         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4653                                               allowed_features,
4654                                               NETIF_F_HW_VLAN_STAG_FILTER))
4655                 requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
4656
4657         if ((requested_features &
4658              (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
4659             (requested_features &
4660              (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
4661             adapter->vlan_v2_caps.offloads.ethertype_match ==
4662             VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
4663                 netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
4664                 requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
4665                                         NETIF_F_HW_VLAN_STAG_TX);
4666         }
4667
4668         return requested_features;
4669 }
4670
4671 /**
4672  * iavf_fix_features - fix up the netdev feature bits
4673  * @netdev: our net device
4674  * @features: desired feature bits
4675  *
4676  * Returns fixed-up features bits
4677  **/
4678 static netdev_features_t iavf_fix_features(struct net_device *netdev,
4679                                            netdev_features_t features)
4680 {
4681         struct iavf_adapter *adapter = netdev_priv(netdev);
4682
4683         return iavf_fix_netdev_vlan_features(adapter, features);
4684 }
4685
4686 static const struct net_device_ops iavf_netdev_ops = {
4687         .ndo_open               = iavf_open,
4688         .ndo_stop               = iavf_close,
4689         .ndo_start_xmit         = iavf_xmit_frame,
4690         .ndo_set_rx_mode        = iavf_set_rx_mode,
4691         .ndo_validate_addr      = eth_validate_addr,
4692         .ndo_set_mac_address    = iavf_set_mac,
4693         .ndo_change_mtu         = iavf_change_mtu,
4694         .ndo_tx_timeout         = iavf_tx_timeout,
4695         .ndo_vlan_rx_add_vid    = iavf_vlan_rx_add_vid,
4696         .ndo_vlan_rx_kill_vid   = iavf_vlan_rx_kill_vid,
4697         .ndo_features_check     = iavf_features_check,
4698         .ndo_fix_features       = iavf_fix_features,
4699         .ndo_set_features       = iavf_set_features,
4700         .ndo_setup_tc           = iavf_setup_tc,
4701 };
4702
4703 /**
4704  * iavf_check_reset_complete - check that VF reset is complete
4705  * @hw: pointer to hw struct
4706  *
4707  * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
4708  **/
4709 static int iavf_check_reset_complete(struct iavf_hw *hw)
4710 {
4711         u32 rstat;
4712         int i;
4713
4714         for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
4715                 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
4716                              IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
4717                 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
4718                     (rstat == VIRTCHNL_VFR_COMPLETED))
4719                         return 0;
4720                 usleep_range(10, 20);
4721         }
4722         return -EBUSY;
4723 }
4724
4725 /**
4726  * iavf_process_config - Process the config information we got from the PF
4727  * @adapter: board private structure
4728  *
4729  * Verify that we have a valid config struct, and set up our netdev features
4730  * and our VSI struct.
4731  **/
4732 int iavf_process_config(struct iavf_adapter *adapter)
4733 {
4734         struct virtchnl_vf_resource *vfres = adapter->vf_res;
4735         netdev_features_t hw_vlan_features, vlan_features;
4736         struct net_device *netdev = adapter->netdev;
4737         netdev_features_t hw_enc_features;
4738         netdev_features_t hw_features;
4739
4740         hw_enc_features = NETIF_F_SG                    |
4741                           NETIF_F_IP_CSUM               |
4742                           NETIF_F_IPV6_CSUM             |
4743                           NETIF_F_HIGHDMA               |
4744                           NETIF_F_SOFT_FEATURES |
4745                           NETIF_F_TSO                   |
4746                           NETIF_F_TSO_ECN               |
4747                           NETIF_F_TSO6                  |
4748                           NETIF_F_SCTP_CRC              |
4749                           NETIF_F_RXHASH                |
4750                           NETIF_F_RXCSUM                |
4751                           0;
4752
4753         /* advertise to stack only if offloads for encapsulated packets is
4754          * supported
4755          */
4756         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
4757                 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL       |
4758                                    NETIF_F_GSO_GRE              |
4759                                    NETIF_F_GSO_GRE_CSUM         |
4760                                    NETIF_F_GSO_IPXIP4           |
4761                                    NETIF_F_GSO_IPXIP6           |
4762                                    NETIF_F_GSO_UDP_TUNNEL_CSUM  |
4763                                    NETIF_F_GSO_PARTIAL          |
4764                                    0;
4765
4766                 if (!(vfres->vf_cap_flags &
4767                       VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
4768                         netdev->gso_partial_features |=
4769                                 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4770
4771                 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
4772                 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
4773                 netdev->hw_enc_features |= hw_enc_features;
4774         }
4775         /* record features VLANs can make use of */
4776         netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
4777
4778         /* Write features and hw_features separately to avoid polluting
4779          * with, or dropping, features that are set when we registered.
4780          */
4781         hw_features = hw_enc_features;
4782
4783         /* get HW VLAN features that can be toggled */
4784         hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
4785
4786         /* Enable cloud filter if ADQ is supported */
4787         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
4788                 hw_features |= NETIF_F_HW_TC;
4789         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
4790                 hw_features |= NETIF_F_GSO_UDP_L4;
4791
4792         netdev->hw_features |= hw_features | hw_vlan_features;
4793         vlan_features = iavf_get_netdev_vlan_features(adapter);
4794
4795         netdev->features |= hw_features | vlan_features;
4796
4797         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
4798                 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4799
4800         netdev->priv_flags |= IFF_UNICAST_FLT;
4801
4802         /* Do not turn on offloads when they are requested to be turned off.
4803          * TSO needs minimum 576 bytes to work correctly.
4804          */
4805         if (netdev->wanted_features) {
4806                 if (!(netdev->wanted_features & NETIF_F_TSO) ||
4807                     netdev->mtu < 576)
4808                         netdev->features &= ~NETIF_F_TSO;
4809                 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
4810                     netdev->mtu < 576)
4811                         netdev->features &= ~NETIF_F_TSO6;
4812                 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
4813                         netdev->features &= ~NETIF_F_TSO_ECN;
4814                 if (!(netdev->wanted_features & NETIF_F_GRO))
4815                         netdev->features &= ~NETIF_F_GRO;
4816                 if (!(netdev->wanted_features & NETIF_F_GSO))
4817                         netdev->features &= ~NETIF_F_GSO;
4818         }
4819
4820         return 0;
4821 }
4822
4823 /**
4824  * iavf_shutdown - Shutdown the device in preparation for a reboot
4825  * @pdev: pci device structure
4826  **/
4827 static void iavf_shutdown(struct pci_dev *pdev)
4828 {
4829         struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
4830         struct net_device *netdev = adapter->netdev;
4831
4832         netif_device_detach(netdev);
4833
4834         if (netif_running(netdev))
4835                 iavf_close(netdev);
4836
4837         if (iavf_lock_timeout(&adapter->crit_lock, 5000))
4838                 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
4839         /* Prevent the watchdog from running. */
4840         iavf_change_state(adapter, __IAVF_REMOVE);
4841         adapter->aq_required = 0;
4842         mutex_unlock(&adapter->crit_lock);
4843
4844 #ifdef CONFIG_PM
4845         pci_save_state(pdev);
4846
4847 #endif
4848         pci_disable_device(pdev);
4849 }
4850
4851 /**
4852  * iavf_probe - Device Initialization Routine
4853  * @pdev: PCI device information struct
4854  * @ent: entry in iavf_pci_tbl
4855  *
4856  * Returns 0 on success, negative on failure
4857  *
4858  * iavf_probe initializes an adapter identified by a pci_dev structure.
4859  * The OS initialization, configuring of the adapter private structure,
4860  * and a hardware reset occur.
4861  **/
4862 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4863 {
4864         struct net_device *netdev;
4865         struct iavf_adapter *adapter = NULL;
4866         struct iavf_hw *hw = NULL;
4867         int err;
4868
4869         err = pci_enable_device(pdev);
4870         if (err)
4871                 return err;
4872
4873         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4874         if (err) {
4875                 dev_err(&pdev->dev,
4876                         "DMA configuration failed: 0x%x\n", err);
4877                 goto err_dma;
4878         }
4879
4880         err = pci_request_regions(pdev, iavf_driver_name);
4881         if (err) {
4882                 dev_err(&pdev->dev,
4883                         "pci_request_regions failed 0x%x\n", err);
4884                 goto err_pci_reg;
4885         }
4886
4887         pci_enable_pcie_error_reporting(pdev);
4888
4889         pci_set_master(pdev);
4890
4891         netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
4892                                    IAVF_MAX_REQ_QUEUES);
4893         if (!netdev) {
4894                 err = -ENOMEM;
4895                 goto err_alloc_etherdev;
4896         }
4897
4898         SET_NETDEV_DEV(netdev, &pdev->dev);
4899
4900         pci_set_drvdata(pdev, netdev);
4901         adapter = netdev_priv(netdev);
4902
4903         adapter->netdev = netdev;
4904         adapter->pdev = pdev;
4905
4906         hw = &adapter->hw;
4907         hw->back = adapter;
4908
4909         adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
4910         iavf_change_state(adapter, __IAVF_STARTUP);
4911
4912         /* Call save state here because it relies on the adapter struct. */
4913         pci_save_state(pdev);
4914
4915         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4916                               pci_resource_len(pdev, 0));
4917         if (!hw->hw_addr) {
4918                 err = -EIO;
4919                 goto err_ioremap;
4920         }
4921         hw->vendor_id = pdev->vendor;
4922         hw->device_id = pdev->device;
4923         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4924         hw->subsystem_vendor_id = pdev->subsystem_vendor;
4925         hw->subsystem_device_id = pdev->subsystem_device;
4926         hw->bus.device = PCI_SLOT(pdev->devfn);
4927         hw->bus.func = PCI_FUNC(pdev->devfn);
4928         hw->bus.bus_id = pdev->bus->number;
4929
4930         /* set up the locks for the AQ, do this only once in probe
4931          * and destroy them only once in remove
4932          */
4933         mutex_init(&adapter->crit_lock);
4934         mutex_init(&adapter->client_lock);
4935         mutex_init(&hw->aq.asq_mutex);
4936         mutex_init(&hw->aq.arq_mutex);
4937
4938         spin_lock_init(&adapter->mac_vlan_list_lock);
4939         spin_lock_init(&adapter->cloud_filter_list_lock);
4940         spin_lock_init(&adapter->fdir_fltr_lock);
4941         spin_lock_init(&adapter->adv_rss_lock);
4942
4943         INIT_LIST_HEAD(&adapter->mac_filter_list);
4944         INIT_LIST_HEAD(&adapter->vlan_filter_list);
4945         INIT_LIST_HEAD(&adapter->cloud_filter_list);
4946         INIT_LIST_HEAD(&adapter->fdir_list_head);
4947         INIT_LIST_HEAD(&adapter->adv_rss_list_head);
4948
4949         INIT_WORK(&adapter->reset_task, iavf_reset_task);
4950         INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
4951         INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
4952         INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
4953         queue_delayed_work(iavf_wq, &adapter->watchdog_task,
4954                            msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
4955
4956         /* Setup the wait queue for indicating transition to down status */
4957         init_waitqueue_head(&adapter->down_waitqueue);
4958
4959         /* Setup the wait queue for indicating virtchannel events */
4960         init_waitqueue_head(&adapter->vc_waitqueue);
4961
4962         return 0;
4963
4964 err_ioremap:
4965         free_netdev(netdev);
4966 err_alloc_etherdev:
4967         pci_disable_pcie_error_reporting(pdev);
4968         pci_release_regions(pdev);
4969 err_pci_reg:
4970 err_dma:
4971         pci_disable_device(pdev);
4972         return err;
4973 }
4974
4975 /**
4976  * iavf_suspend - Power management suspend routine
4977  * @dev_d: device info pointer
4978  *
4979  * Called when the system (VM) is entering sleep/suspend.
4980  **/
4981 static int __maybe_unused iavf_suspend(struct device *dev_d)
4982 {
4983         struct net_device *netdev = dev_get_drvdata(dev_d);
4984         struct iavf_adapter *adapter = netdev_priv(netdev);
4985
4986         netif_device_detach(netdev);
4987
4988         while (!mutex_trylock(&adapter->crit_lock))
4989                 usleep_range(500, 1000);
4990
4991         if (netif_running(netdev)) {
4992                 rtnl_lock();
4993                 iavf_down(adapter);
4994                 rtnl_unlock();
4995         }
4996         iavf_free_misc_irq(adapter);
4997         iavf_reset_interrupt_capability(adapter);
4998
4999         mutex_unlock(&adapter->crit_lock);
5000
5001         return 0;
5002 }
5003
5004 /**
5005  * iavf_resume - Power management resume routine
5006  * @dev_d: device info pointer
5007  *
5008  * Called when the system (VM) is resumed from sleep/suspend.
5009  **/
5010 static int __maybe_unused iavf_resume(struct device *dev_d)
5011 {
5012         struct pci_dev *pdev = to_pci_dev(dev_d);
5013         struct iavf_adapter *adapter;
5014         u32 err;
5015
5016         adapter = iavf_pdev_to_adapter(pdev);
5017
5018         pci_set_master(pdev);
5019
5020         rtnl_lock();
5021         err = iavf_set_interrupt_capability(adapter);
5022         if (err) {
5023                 rtnl_unlock();
5024                 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
5025                 return err;
5026         }
5027         err = iavf_request_misc_irq(adapter);
5028         rtnl_unlock();
5029         if (err) {
5030                 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
5031                 return err;
5032         }
5033
5034         queue_work(iavf_wq, &adapter->reset_task);
5035
5036         netif_device_attach(adapter->netdev);
5037
5038         return err;
5039 }
5040
5041 /**
5042  * iavf_remove - Device Removal Routine
5043  * @pdev: PCI device information struct
5044  *
5045  * iavf_remove is called by the PCI subsystem to alert the driver
5046  * that it should release a PCI device.  The could be caused by a
5047  * Hot-Plug event, or because the driver is going to be removed from
5048  * memory.
5049  **/
5050 static void iavf_remove(struct pci_dev *pdev)
5051 {
5052         struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
5053         struct net_device *netdev = adapter->netdev;
5054         struct iavf_fdir_fltr *fdir, *fdirtmp;
5055         struct iavf_vlan_filter *vlf, *vlftmp;
5056         struct iavf_adv_rss *rss, *rsstmp;
5057         struct iavf_mac_filter *f, *ftmp;
5058         struct iavf_cloud_filter *cf, *cftmp;
5059         struct iavf_hw *hw = &adapter->hw;
5060         int err;
5061
5062         /* When reboot/shutdown is in progress no need to do anything
5063          * as the adapter is already REMOVE state that was set during
5064          * iavf_shutdown() callback.
5065          */
5066         if (adapter->state == __IAVF_REMOVE)
5067                 return;
5068
5069         set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
5070         /* Wait until port initialization is complete.
5071          * There are flows where register/unregister netdev may race.
5072          */
5073         while (1) {
5074                 mutex_lock(&adapter->crit_lock);
5075                 if (adapter->state == __IAVF_RUNNING ||
5076                     adapter->state == __IAVF_DOWN ||
5077                     adapter->state == __IAVF_INIT_FAILED) {
5078                         mutex_unlock(&adapter->crit_lock);
5079                         break;
5080                 }
5081
5082                 mutex_unlock(&adapter->crit_lock);
5083                 usleep_range(500, 1000);
5084         }
5085         cancel_delayed_work_sync(&adapter->watchdog_task);
5086
5087         if (adapter->netdev_registered) {
5088                 rtnl_lock();
5089                 unregister_netdevice(netdev);
5090                 adapter->netdev_registered = false;
5091                 rtnl_unlock();
5092         }
5093         if (CLIENT_ALLOWED(adapter)) {
5094                 err = iavf_lan_del_device(adapter);
5095                 if (err)
5096                         dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
5097                                  err);
5098         }
5099
5100         mutex_lock(&adapter->crit_lock);
5101         dev_info(&adapter->pdev->dev, "Remove device\n");
5102         iavf_change_state(adapter, __IAVF_REMOVE);
5103
5104         iavf_request_reset(adapter);
5105         msleep(50);
5106         /* If the FW isn't responding, kick it once, but only once. */
5107         if (!iavf_asq_done(hw)) {
5108                 iavf_request_reset(adapter);
5109                 msleep(50);
5110         }
5111
5112         iavf_misc_irq_disable(adapter);
5113         /* Shut down all the garbage mashers on the detention level */
5114         cancel_work_sync(&adapter->reset_task);
5115         cancel_delayed_work_sync(&adapter->watchdog_task);
5116         cancel_work_sync(&adapter->adminq_task);
5117         cancel_delayed_work_sync(&adapter->client_task);
5118
5119         adapter->aq_required = 0;
5120         adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
5121
5122         iavf_free_all_tx_resources(adapter);
5123         iavf_free_all_rx_resources(adapter);
5124         iavf_free_misc_irq(adapter);
5125
5126         iavf_reset_interrupt_capability(adapter);
5127         iavf_free_q_vectors(adapter);
5128
5129         iavf_free_rss(adapter);
5130
5131         if (hw->aq.asq.count)
5132                 iavf_shutdown_adminq(hw);
5133
5134         /* destroy the locks only once, here */
5135         mutex_destroy(&hw->aq.arq_mutex);
5136         mutex_destroy(&hw->aq.asq_mutex);
5137         mutex_destroy(&adapter->client_lock);
5138         mutex_unlock(&adapter->crit_lock);
5139         mutex_destroy(&adapter->crit_lock);
5140
5141         iounmap(hw->hw_addr);
5142         pci_release_regions(pdev);
5143         iavf_free_queues(adapter);
5144         kfree(adapter->vf_res);
5145         spin_lock_bh(&adapter->mac_vlan_list_lock);
5146         /* If we got removed before an up/down sequence, we've got a filter
5147          * hanging out there that we need to get rid of.
5148          */
5149         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
5150                 list_del(&f->list);
5151                 kfree(f);
5152         }
5153         list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
5154                                  list) {
5155                 list_del(&vlf->list);
5156                 kfree(vlf);
5157         }
5158
5159         spin_unlock_bh(&adapter->mac_vlan_list_lock);
5160
5161         spin_lock_bh(&adapter->cloud_filter_list_lock);
5162         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
5163                 list_del(&cf->list);
5164                 kfree(cf);
5165         }
5166         spin_unlock_bh(&adapter->cloud_filter_list_lock);
5167
5168         spin_lock_bh(&adapter->fdir_fltr_lock);
5169         list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
5170                 list_del(&fdir->list);
5171                 kfree(fdir);
5172         }
5173         spin_unlock_bh(&adapter->fdir_fltr_lock);
5174
5175         spin_lock_bh(&adapter->adv_rss_lock);
5176         list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
5177                                  list) {
5178                 list_del(&rss->list);
5179                 kfree(rss);
5180         }
5181         spin_unlock_bh(&adapter->adv_rss_lock);
5182
5183         free_netdev(netdev);
5184
5185         pci_disable_pcie_error_reporting(pdev);
5186
5187         pci_disable_device(pdev);
5188 }
5189
5190 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
5191
5192 static struct pci_driver iavf_driver = {
5193         .name      = iavf_driver_name,
5194         .id_table  = iavf_pci_tbl,
5195         .probe     = iavf_probe,
5196         .remove    = iavf_remove,
5197         .driver.pm = &iavf_pm_ops,
5198         .shutdown  = iavf_shutdown,
5199 };
5200
5201 /**
5202  * iavf_init_module - Driver Registration Routine
5203  *
5204  * iavf_init_module is the first routine called when the driver is
5205  * loaded. All it does is register with the PCI subsystem.
5206  **/
5207 static int __init iavf_init_module(void)
5208 {
5209         pr_info("iavf: %s\n", iavf_driver_string);
5210
5211         pr_info("%s\n", iavf_copyright);
5212
5213         iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
5214                                   iavf_driver_name);
5215         if (!iavf_wq) {
5216                 pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
5217                 return -ENOMEM;
5218         }
5219         return pci_register_driver(&iavf_driver);
5220 }
5221
5222 module_init(iavf_init_module);
5223
5224 /**
5225  * iavf_exit_module - Driver Exit Cleanup Routine
5226  *
5227  * iavf_exit_module is called just before the driver is removed
5228  * from memory.
5229  **/
5230 static void __exit iavf_exit_module(void)
5231 {
5232         pci_unregister_driver(&iavf_driver);
5233         destroy_workqueue(iavf_wq);
5234 }
5235
5236 module_exit(iavf_exit_module);
5237
5238 /* iavf_main.c */