x86/reboot: Limit Dell Optiplex 990 quirk to early BIOS versions
[linux-2.6-microblaze.git] / drivers / net / ethernet / intel / iavf / iavf_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include "iavf.h"
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 /* All iavf tracepoints are defined by the include below, which must
8  * be included exactly once across the whole kernel with
9  * CREATE_TRACE_POINTS defined
10  */
11 #define CREATE_TRACE_POINTS
12 #include "iavf_trace.h"
13
14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16 static int iavf_close(struct net_device *netdev);
17 static int iavf_init_get_resources(struct iavf_adapter *adapter);
18 static int iavf_check_reset_complete(struct iavf_hw *hw);
19
20 char iavf_driver_name[] = "iavf";
21 static const char iavf_driver_string[] =
22         "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
23
24 static const char iavf_copyright[] =
25         "Copyright (c) 2013 - 2018 Intel Corporation.";
26
27 /* iavf_pci_tbl - PCI Device ID Table
28  *
29  * Wildcard entries (PCI_ANY_ID) should come last
30  * Last entry must be all 0s
31  *
32  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
33  *   Class, Class Mask, private data (not used) }
34  */
35 static const struct pci_device_id iavf_pci_tbl[] = {
36         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
37         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
38         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
39         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
40         /* required last entry */
41         {0, }
42 };
43
44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
45
46 MODULE_ALIAS("i40evf");
47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
49 MODULE_LICENSE("GPL v2");
50
51 static const struct net_device_ops iavf_netdev_ops;
52 struct workqueue_struct *iavf_wq;
53
54 /**
55  * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
56  * @hw:   pointer to the HW structure
57  * @mem:  ptr to mem struct to fill out
58  * @size: size of memory requested
59  * @alignment: what to align the allocation to
60  **/
61 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
62                                          struct iavf_dma_mem *mem,
63                                          u64 size, u32 alignment)
64 {
65         struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
66
67         if (!mem)
68                 return IAVF_ERR_PARAM;
69
70         mem->size = ALIGN(size, alignment);
71         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
72                                      (dma_addr_t *)&mem->pa, GFP_KERNEL);
73         if (mem->va)
74                 return 0;
75         else
76                 return IAVF_ERR_NO_MEMORY;
77 }
78
79 /**
80  * iavf_free_dma_mem_d - OS specific memory free for shared code
81  * @hw:   pointer to the HW structure
82  * @mem:  ptr to mem struct to free
83  **/
84 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
85                                      struct iavf_dma_mem *mem)
86 {
87         struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
88
89         if (!mem || !mem->va)
90                 return IAVF_ERR_PARAM;
91         dma_free_coherent(&adapter->pdev->dev, mem->size,
92                           mem->va, (dma_addr_t)mem->pa);
93         return 0;
94 }
95
96 /**
97  * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
98  * @hw:   pointer to the HW structure
99  * @mem:  ptr to mem struct to fill out
100  * @size: size of memory requested
101  **/
102 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
103                                           struct iavf_virt_mem *mem, u32 size)
104 {
105         if (!mem)
106                 return IAVF_ERR_PARAM;
107
108         mem->size = size;
109         mem->va = kzalloc(size, GFP_KERNEL);
110
111         if (mem->va)
112                 return 0;
113         else
114                 return IAVF_ERR_NO_MEMORY;
115 }
116
117 /**
118  * iavf_free_virt_mem_d - OS specific memory free for shared code
119  * @hw:   pointer to the HW structure
120  * @mem:  ptr to mem struct to free
121  **/
122 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
123                                       struct iavf_virt_mem *mem)
124 {
125         if (!mem)
126                 return IAVF_ERR_PARAM;
127
128         /* it's ok to kfree a NULL pointer */
129         kfree(mem->va);
130
131         return 0;
132 }
133
134 /**
135  * iavf_schedule_reset - Set the flags and schedule a reset event
136  * @adapter: board private structure
137  **/
138 void iavf_schedule_reset(struct iavf_adapter *adapter)
139 {
140         if (!(adapter->flags &
141               (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
142                 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
143                 queue_work(iavf_wq, &adapter->reset_task);
144         }
145 }
146
147 /**
148  * iavf_tx_timeout - Respond to a Tx Hang
149  * @netdev: network interface device structure
150  * @txqueue: queue number that is timing out
151  **/
152 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
153 {
154         struct iavf_adapter *adapter = netdev_priv(netdev);
155
156         adapter->tx_timeout_count++;
157         iavf_schedule_reset(adapter);
158 }
159
160 /**
161  * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
162  * @adapter: board private structure
163  **/
164 static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
165 {
166         struct iavf_hw *hw = &adapter->hw;
167
168         if (!adapter->msix_entries)
169                 return;
170
171         wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
172
173         iavf_flush(hw);
174
175         synchronize_irq(adapter->msix_entries[0].vector);
176 }
177
178 /**
179  * iavf_misc_irq_enable - Enable default interrupt generation settings
180  * @adapter: board private structure
181  **/
182 static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
183 {
184         struct iavf_hw *hw = &adapter->hw;
185
186         wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
187                                        IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
188         wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
189
190         iavf_flush(hw);
191 }
192
193 /**
194  * iavf_irq_disable - Mask off interrupt generation on the NIC
195  * @adapter: board private structure
196  **/
197 static void iavf_irq_disable(struct iavf_adapter *adapter)
198 {
199         int i;
200         struct iavf_hw *hw = &adapter->hw;
201
202         if (!adapter->msix_entries)
203                 return;
204
205         for (i = 1; i < adapter->num_msix_vectors; i++) {
206                 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
207                 synchronize_irq(adapter->msix_entries[i].vector);
208         }
209         iavf_flush(hw);
210 }
211
212 /**
213  * iavf_irq_enable_queues - Enable interrupt for specified queues
214  * @adapter: board private structure
215  * @mask: bitmap of queues to enable
216  **/
217 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
218 {
219         struct iavf_hw *hw = &adapter->hw;
220         int i;
221
222         for (i = 1; i < adapter->num_msix_vectors; i++) {
223                 if (mask & BIT(i - 1)) {
224                         wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
225                              IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
226                              IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
227                 }
228         }
229 }
230
231 /**
232  * iavf_irq_enable - Enable default interrupt generation settings
233  * @adapter: board private structure
234  * @flush: boolean value whether to run rd32()
235  **/
236 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
237 {
238         struct iavf_hw *hw = &adapter->hw;
239
240         iavf_misc_irq_enable(adapter);
241         iavf_irq_enable_queues(adapter, ~0);
242
243         if (flush)
244                 iavf_flush(hw);
245 }
246
247 /**
248  * iavf_msix_aq - Interrupt handler for vector 0
249  * @irq: interrupt number
250  * @data: pointer to netdev
251  **/
252 static irqreturn_t iavf_msix_aq(int irq, void *data)
253 {
254         struct net_device *netdev = data;
255         struct iavf_adapter *adapter = netdev_priv(netdev);
256         struct iavf_hw *hw = &adapter->hw;
257
258         /* handle non-queue interrupts, these reads clear the registers */
259         rd32(hw, IAVF_VFINT_ICR01);
260         rd32(hw, IAVF_VFINT_ICR0_ENA1);
261
262         /* schedule work on the private workqueue */
263         queue_work(iavf_wq, &adapter->adminq_task);
264
265         return IRQ_HANDLED;
266 }
267
268 /**
269  * iavf_msix_clean_rings - MSIX mode Interrupt Handler
270  * @irq: interrupt number
271  * @data: pointer to a q_vector
272  **/
273 static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
274 {
275         struct iavf_q_vector *q_vector = data;
276
277         if (!q_vector->tx.ring && !q_vector->rx.ring)
278                 return IRQ_HANDLED;
279
280         napi_schedule_irqoff(&q_vector->napi);
281
282         return IRQ_HANDLED;
283 }
284
285 /**
286  * iavf_map_vector_to_rxq - associate irqs with rx queues
287  * @adapter: board private structure
288  * @v_idx: interrupt number
289  * @r_idx: queue number
290  **/
291 static void
292 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
293 {
294         struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
295         struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
296         struct iavf_hw *hw = &adapter->hw;
297
298         rx_ring->q_vector = q_vector;
299         rx_ring->next = q_vector->rx.ring;
300         rx_ring->vsi = &adapter->vsi;
301         q_vector->rx.ring = rx_ring;
302         q_vector->rx.count++;
303         q_vector->rx.next_update = jiffies + 1;
304         q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
305         q_vector->ring_mask |= BIT(r_idx);
306         wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
307              q_vector->rx.current_itr >> 1);
308         q_vector->rx.current_itr = q_vector->rx.target_itr;
309 }
310
311 /**
312  * iavf_map_vector_to_txq - associate irqs with tx queues
313  * @adapter: board private structure
314  * @v_idx: interrupt number
315  * @t_idx: queue number
316  **/
317 static void
318 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
319 {
320         struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
321         struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
322         struct iavf_hw *hw = &adapter->hw;
323
324         tx_ring->q_vector = q_vector;
325         tx_ring->next = q_vector->tx.ring;
326         tx_ring->vsi = &adapter->vsi;
327         q_vector->tx.ring = tx_ring;
328         q_vector->tx.count++;
329         q_vector->tx.next_update = jiffies + 1;
330         q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
331         q_vector->num_ringpairs++;
332         wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
333              q_vector->tx.target_itr >> 1);
334         q_vector->tx.current_itr = q_vector->tx.target_itr;
335 }
336
337 /**
338  * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
339  * @adapter: board private structure to initialize
340  *
341  * This function maps descriptor rings to the queue-specific vectors
342  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
343  * one vector per ring/queue, but on a constrained vector budget, we
344  * group the rings as "efficiently" as possible.  You would add new
345  * mapping configurations in here.
346  **/
347 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
348 {
349         int rings_remaining = adapter->num_active_queues;
350         int ridx = 0, vidx = 0;
351         int q_vectors;
352
353         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
354
355         for (; ridx < rings_remaining; ridx++) {
356                 iavf_map_vector_to_rxq(adapter, vidx, ridx);
357                 iavf_map_vector_to_txq(adapter, vidx, ridx);
358
359                 /* In the case where we have more queues than vectors, continue
360                  * round-robin on vectors until all queues are mapped.
361                  */
362                 if (++vidx >= q_vectors)
363                         vidx = 0;
364         }
365
366         adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
367 }
368
369 /**
370  * iavf_irq_affinity_notify - Callback for affinity changes
371  * @notify: context as to what irq was changed
372  * @mask: the new affinity mask
373  *
374  * This is a callback function used by the irq_set_affinity_notifier function
375  * so that we may register to receive changes to the irq affinity masks.
376  **/
377 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
378                                      const cpumask_t *mask)
379 {
380         struct iavf_q_vector *q_vector =
381                 container_of(notify, struct iavf_q_vector, affinity_notify);
382
383         cpumask_copy(&q_vector->affinity_mask, mask);
384 }
385
386 /**
387  * iavf_irq_affinity_release - Callback for affinity notifier release
388  * @ref: internal core kernel usage
389  *
390  * This is a callback function used by the irq_set_affinity_notifier function
391  * to inform the current notification subscriber that they will no longer
392  * receive notifications.
393  **/
394 static void iavf_irq_affinity_release(struct kref *ref) {}
395
396 /**
397  * iavf_request_traffic_irqs - Initialize MSI-X interrupts
398  * @adapter: board private structure
399  * @basename: device basename
400  *
401  * Allocates MSI-X vectors for tx and rx handling, and requests
402  * interrupts from the kernel.
403  **/
404 static int
405 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
406 {
407         unsigned int vector, q_vectors;
408         unsigned int rx_int_idx = 0, tx_int_idx = 0;
409         int irq_num, err;
410         int cpu;
411
412         iavf_irq_disable(adapter);
413         /* Decrement for Other and TCP Timer vectors */
414         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
415
416         for (vector = 0; vector < q_vectors; vector++) {
417                 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
418
419                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
420
421                 if (q_vector->tx.ring && q_vector->rx.ring) {
422                         snprintf(q_vector->name, sizeof(q_vector->name),
423                                  "iavf-%s-TxRx-%d", basename, rx_int_idx++);
424                         tx_int_idx++;
425                 } else if (q_vector->rx.ring) {
426                         snprintf(q_vector->name, sizeof(q_vector->name),
427                                  "iavf-%s-rx-%d", basename, rx_int_idx++);
428                 } else if (q_vector->tx.ring) {
429                         snprintf(q_vector->name, sizeof(q_vector->name),
430                                  "iavf-%s-tx-%d", basename, tx_int_idx++);
431                 } else {
432                         /* skip this unused q_vector */
433                         continue;
434                 }
435                 err = request_irq(irq_num,
436                                   iavf_msix_clean_rings,
437                                   0,
438                                   q_vector->name,
439                                   q_vector);
440                 if (err) {
441                         dev_info(&adapter->pdev->dev,
442                                  "Request_irq failed, error: %d\n", err);
443                         goto free_queue_irqs;
444                 }
445                 /* register for affinity change notifications */
446                 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
447                 q_vector->affinity_notify.release =
448                                                    iavf_irq_affinity_release;
449                 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
450                 /* Spread the IRQ affinity hints across online CPUs. Note that
451                  * get_cpu_mask returns a mask with a permanent lifetime so
452                  * it's safe to use as a hint for irq_set_affinity_hint.
453                  */
454                 cpu = cpumask_local_spread(q_vector->v_idx, -1);
455                 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
456         }
457
458         return 0;
459
460 free_queue_irqs:
461         while (vector) {
462                 vector--;
463                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
464                 irq_set_affinity_notifier(irq_num, NULL);
465                 irq_set_affinity_hint(irq_num, NULL);
466                 free_irq(irq_num, &adapter->q_vectors[vector]);
467         }
468         return err;
469 }
470
471 /**
472  * iavf_request_misc_irq - Initialize MSI-X interrupts
473  * @adapter: board private structure
474  *
475  * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
476  * vector is only for the admin queue, and stays active even when the netdev
477  * is closed.
478  **/
479 static int iavf_request_misc_irq(struct iavf_adapter *adapter)
480 {
481         struct net_device *netdev = adapter->netdev;
482         int err;
483
484         snprintf(adapter->misc_vector_name,
485                  sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
486                  dev_name(&adapter->pdev->dev));
487         err = request_irq(adapter->msix_entries[0].vector,
488                           &iavf_msix_aq, 0,
489                           adapter->misc_vector_name, netdev);
490         if (err) {
491                 dev_err(&adapter->pdev->dev,
492                         "request_irq for %s failed: %d\n",
493                         adapter->misc_vector_name, err);
494                 free_irq(adapter->msix_entries[0].vector, netdev);
495         }
496         return err;
497 }
498
499 /**
500  * iavf_free_traffic_irqs - Free MSI-X interrupts
501  * @adapter: board private structure
502  *
503  * Frees all MSI-X vectors other than 0.
504  **/
505 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
506 {
507         int vector, irq_num, q_vectors;
508
509         if (!adapter->msix_entries)
510                 return;
511
512         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
513
514         for (vector = 0; vector < q_vectors; vector++) {
515                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
516                 irq_set_affinity_notifier(irq_num, NULL);
517                 irq_set_affinity_hint(irq_num, NULL);
518                 free_irq(irq_num, &adapter->q_vectors[vector]);
519         }
520 }
521
522 /**
523  * iavf_free_misc_irq - Free MSI-X miscellaneous vector
524  * @adapter: board private structure
525  *
526  * Frees MSI-X vector 0.
527  **/
528 static void iavf_free_misc_irq(struct iavf_adapter *adapter)
529 {
530         struct net_device *netdev = adapter->netdev;
531
532         if (!adapter->msix_entries)
533                 return;
534
535         free_irq(adapter->msix_entries[0].vector, netdev);
536 }
537
538 /**
539  * iavf_configure_tx - Configure Transmit Unit after Reset
540  * @adapter: board private structure
541  *
542  * Configure the Tx unit of the MAC after a reset.
543  **/
544 static void iavf_configure_tx(struct iavf_adapter *adapter)
545 {
546         struct iavf_hw *hw = &adapter->hw;
547         int i;
548
549         for (i = 0; i < adapter->num_active_queues; i++)
550                 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
551 }
552
553 /**
554  * iavf_configure_rx - Configure Receive Unit after Reset
555  * @adapter: board private structure
556  *
557  * Configure the Rx unit of the MAC after a reset.
558  **/
559 static void iavf_configure_rx(struct iavf_adapter *adapter)
560 {
561         unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
562         struct iavf_hw *hw = &adapter->hw;
563         int i;
564
565         /* Legacy Rx will always default to a 2048 buffer size. */
566 #if (PAGE_SIZE < 8192)
567         if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
568                 struct net_device *netdev = adapter->netdev;
569
570                 /* For jumbo frames on systems with 4K pages we have to use
571                  * an order 1 page, so we might as well increase the size
572                  * of our Rx buffer to make better use of the available space
573                  */
574                 rx_buf_len = IAVF_RXBUFFER_3072;
575
576                 /* We use a 1536 buffer size for configurations with
577                  * standard Ethernet mtu.  On x86 this gives us enough room
578                  * for shared info and 192 bytes of padding.
579                  */
580                 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
581                     (netdev->mtu <= ETH_DATA_LEN))
582                         rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
583         }
584 #endif
585
586         for (i = 0; i < adapter->num_active_queues; i++) {
587                 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
588                 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
589
590                 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
591                         clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
592                 else
593                         set_ring_build_skb_enabled(&adapter->rx_rings[i]);
594         }
595 }
596
597 /**
598  * iavf_find_vlan - Search filter list for specific vlan filter
599  * @adapter: board private structure
600  * @vlan: vlan tag
601  *
602  * Returns ptr to the filter object or NULL. Must be called while holding the
603  * mac_vlan_list_lock.
604  **/
605 static struct
606 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
607 {
608         struct iavf_vlan_filter *f;
609
610         list_for_each_entry(f, &adapter->vlan_filter_list, list) {
611                 if (vlan == f->vlan)
612                         return f;
613         }
614         return NULL;
615 }
616
617 /**
618  * iavf_add_vlan - Add a vlan filter to the list
619  * @adapter: board private structure
620  * @vlan: VLAN tag
621  *
622  * Returns ptr to the filter object or NULL when no memory available.
623  **/
624 static struct
625 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
626 {
627         struct iavf_vlan_filter *f = NULL;
628
629         spin_lock_bh(&adapter->mac_vlan_list_lock);
630
631         f = iavf_find_vlan(adapter, vlan);
632         if (!f) {
633                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
634                 if (!f)
635                         goto clearout;
636
637                 f->vlan = vlan;
638
639                 list_add_tail(&f->list, &adapter->vlan_filter_list);
640                 f->add = true;
641                 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
642         }
643
644 clearout:
645         spin_unlock_bh(&adapter->mac_vlan_list_lock);
646         return f;
647 }
648
649 /**
650  * iavf_del_vlan - Remove a vlan filter from the list
651  * @adapter: board private structure
652  * @vlan: VLAN tag
653  **/
654 static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
655 {
656         struct iavf_vlan_filter *f;
657
658         spin_lock_bh(&adapter->mac_vlan_list_lock);
659
660         f = iavf_find_vlan(adapter, vlan);
661         if (f) {
662                 f->remove = true;
663                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
664         }
665
666         spin_unlock_bh(&adapter->mac_vlan_list_lock);
667 }
668
669 /**
670  * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
671  * @netdev: network device struct
672  * @proto: unused protocol data
673  * @vid: VLAN tag
674  **/
675 static int iavf_vlan_rx_add_vid(struct net_device *netdev,
676                                 __always_unused __be16 proto, u16 vid)
677 {
678         struct iavf_adapter *adapter = netdev_priv(netdev);
679
680         if (!VLAN_ALLOWED(adapter))
681                 return -EIO;
682         if (iavf_add_vlan(adapter, vid) == NULL)
683                 return -ENOMEM;
684         return 0;
685 }
686
687 /**
688  * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
689  * @netdev: network device struct
690  * @proto: unused protocol data
691  * @vid: VLAN tag
692  **/
693 static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
694                                  __always_unused __be16 proto, u16 vid)
695 {
696         struct iavf_adapter *adapter = netdev_priv(netdev);
697
698         if (VLAN_ALLOWED(adapter)) {
699                 iavf_del_vlan(adapter, vid);
700                 return 0;
701         }
702         return -EIO;
703 }
704
705 /**
706  * iavf_find_filter - Search filter list for specific mac filter
707  * @adapter: board private structure
708  * @macaddr: the MAC address
709  *
710  * Returns ptr to the filter object or NULL. Must be called while holding the
711  * mac_vlan_list_lock.
712  **/
713 static struct
714 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
715                                   const u8 *macaddr)
716 {
717         struct iavf_mac_filter *f;
718
719         if (!macaddr)
720                 return NULL;
721
722         list_for_each_entry(f, &adapter->mac_filter_list, list) {
723                 if (ether_addr_equal(macaddr, f->macaddr))
724                         return f;
725         }
726         return NULL;
727 }
728
729 /**
730  * iavf_add_filter - Add a mac filter to the filter list
731  * @adapter: board private structure
732  * @macaddr: the MAC address
733  *
734  * Returns ptr to the filter object or NULL when no memory available.
735  **/
736 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
737                                         const u8 *macaddr)
738 {
739         struct iavf_mac_filter *f;
740
741         if (!macaddr)
742                 return NULL;
743
744         f = iavf_find_filter(adapter, macaddr);
745         if (!f) {
746                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
747                 if (!f)
748                         return f;
749
750                 ether_addr_copy(f->macaddr, macaddr);
751
752                 list_add_tail(&f->list, &adapter->mac_filter_list);
753                 f->add = true;
754                 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
755         } else {
756                 f->remove = false;
757         }
758
759         return f;
760 }
761
762 /**
763  * iavf_set_mac - NDO callback to set port mac address
764  * @netdev: network interface device structure
765  * @p: pointer to an address structure
766  *
767  * Returns 0 on success, negative on failure
768  **/
769 static int iavf_set_mac(struct net_device *netdev, void *p)
770 {
771         struct iavf_adapter *adapter = netdev_priv(netdev);
772         struct iavf_hw *hw = &adapter->hw;
773         struct iavf_mac_filter *f;
774         struct sockaddr *addr = p;
775
776         if (!is_valid_ether_addr(addr->sa_data))
777                 return -EADDRNOTAVAIL;
778
779         if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
780                 return 0;
781
782         spin_lock_bh(&adapter->mac_vlan_list_lock);
783
784         f = iavf_find_filter(adapter, hw->mac.addr);
785         if (f) {
786                 f->remove = true;
787                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
788         }
789
790         f = iavf_add_filter(adapter, addr->sa_data);
791
792         spin_unlock_bh(&adapter->mac_vlan_list_lock);
793
794         if (f) {
795                 ether_addr_copy(hw->mac.addr, addr->sa_data);
796         }
797
798         return (f == NULL) ? -ENOMEM : 0;
799 }
800
801 /**
802  * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
803  * @netdev: the netdevice
804  * @addr: address to add
805  *
806  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
807  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
808  */
809 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
810 {
811         struct iavf_adapter *adapter = netdev_priv(netdev);
812
813         if (iavf_add_filter(adapter, addr))
814                 return 0;
815         else
816                 return -ENOMEM;
817 }
818
819 /**
820  * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
821  * @netdev: the netdevice
822  * @addr: address to add
823  *
824  * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
825  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
826  */
827 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
828 {
829         struct iavf_adapter *adapter = netdev_priv(netdev);
830         struct iavf_mac_filter *f;
831
832         /* Under some circumstances, we might receive a request to delete
833          * our own device address from our uc list. Because we store the
834          * device address in the VSI's MAC/VLAN filter list, we need to ignore
835          * such requests and not delete our device address from this list.
836          */
837         if (ether_addr_equal(addr, netdev->dev_addr))
838                 return 0;
839
840         f = iavf_find_filter(adapter, addr);
841         if (f) {
842                 f->remove = true;
843                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
844         }
845         return 0;
846 }
847
848 /**
849  * iavf_set_rx_mode - NDO callback to set the netdev filters
850  * @netdev: network interface device structure
851  **/
852 static void iavf_set_rx_mode(struct net_device *netdev)
853 {
854         struct iavf_adapter *adapter = netdev_priv(netdev);
855
856         spin_lock_bh(&adapter->mac_vlan_list_lock);
857         __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
858         __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
859         spin_unlock_bh(&adapter->mac_vlan_list_lock);
860
861         if (netdev->flags & IFF_PROMISC &&
862             !(adapter->flags & IAVF_FLAG_PROMISC_ON))
863                 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
864         else if (!(netdev->flags & IFF_PROMISC) &&
865                  adapter->flags & IAVF_FLAG_PROMISC_ON)
866                 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
867
868         if (netdev->flags & IFF_ALLMULTI &&
869             !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
870                 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
871         else if (!(netdev->flags & IFF_ALLMULTI) &&
872                  adapter->flags & IAVF_FLAG_ALLMULTI_ON)
873                 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
874 }
875
876 /**
877  * iavf_napi_enable_all - enable NAPI on all queue vectors
878  * @adapter: board private structure
879  **/
880 static void iavf_napi_enable_all(struct iavf_adapter *adapter)
881 {
882         int q_idx;
883         struct iavf_q_vector *q_vector;
884         int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
885
886         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
887                 struct napi_struct *napi;
888
889                 q_vector = &adapter->q_vectors[q_idx];
890                 napi = &q_vector->napi;
891                 napi_enable(napi);
892         }
893 }
894
895 /**
896  * iavf_napi_disable_all - disable NAPI on all queue vectors
897  * @adapter: board private structure
898  **/
899 static void iavf_napi_disable_all(struct iavf_adapter *adapter)
900 {
901         int q_idx;
902         struct iavf_q_vector *q_vector;
903         int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
904
905         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
906                 q_vector = &adapter->q_vectors[q_idx];
907                 napi_disable(&q_vector->napi);
908         }
909 }
910
911 /**
912  * iavf_configure - set up transmit and receive data structures
913  * @adapter: board private structure
914  **/
915 static void iavf_configure(struct iavf_adapter *adapter)
916 {
917         struct net_device *netdev = adapter->netdev;
918         int i;
919
920         iavf_set_rx_mode(netdev);
921
922         iavf_configure_tx(adapter);
923         iavf_configure_rx(adapter);
924         adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
925
926         for (i = 0; i < adapter->num_active_queues; i++) {
927                 struct iavf_ring *ring = &adapter->rx_rings[i];
928
929                 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
930         }
931 }
932
933 /**
934  * iavf_up_complete - Finish the last steps of bringing up a connection
935  * @adapter: board private structure
936  *
937  * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
938  **/
939 static void iavf_up_complete(struct iavf_adapter *adapter)
940 {
941         adapter->state = __IAVF_RUNNING;
942         clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
943
944         iavf_napi_enable_all(adapter);
945
946         adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
947         if (CLIENT_ENABLED(adapter))
948                 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
949         mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
950 }
951
952 /**
953  * iavf_down - Shutdown the connection processing
954  * @adapter: board private structure
955  *
956  * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
957  **/
958 void iavf_down(struct iavf_adapter *adapter)
959 {
960         struct net_device *netdev = adapter->netdev;
961         struct iavf_vlan_filter *vlf;
962         struct iavf_cloud_filter *cf;
963         struct iavf_fdir_fltr *fdir;
964         struct iavf_mac_filter *f;
965         struct iavf_adv_rss *rss;
966
967         if (adapter->state <= __IAVF_DOWN_PENDING)
968                 return;
969
970         netif_carrier_off(netdev);
971         netif_tx_disable(netdev);
972         adapter->link_up = false;
973         iavf_napi_disable_all(adapter);
974         iavf_irq_disable(adapter);
975
976         spin_lock_bh(&adapter->mac_vlan_list_lock);
977
978         /* clear the sync flag on all filters */
979         __dev_uc_unsync(adapter->netdev, NULL);
980         __dev_mc_unsync(adapter->netdev, NULL);
981
982         /* remove all MAC filters */
983         list_for_each_entry(f, &adapter->mac_filter_list, list) {
984                 f->remove = true;
985         }
986
987         /* remove all VLAN filters */
988         list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
989                 vlf->remove = true;
990         }
991
992         spin_unlock_bh(&adapter->mac_vlan_list_lock);
993
994         /* remove all cloud filters */
995         spin_lock_bh(&adapter->cloud_filter_list_lock);
996         list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
997                 cf->del = true;
998         }
999         spin_unlock_bh(&adapter->cloud_filter_list_lock);
1000
1001         /* remove all Flow Director filters */
1002         spin_lock_bh(&adapter->fdir_fltr_lock);
1003         list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1004                 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1005         }
1006         spin_unlock_bh(&adapter->fdir_fltr_lock);
1007
1008         /* remove all advance RSS configuration */
1009         spin_lock_bh(&adapter->adv_rss_lock);
1010         list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
1011                 rss->state = IAVF_ADV_RSS_DEL_REQUEST;
1012         spin_unlock_bh(&adapter->adv_rss_lock);
1013
1014         if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
1015             adapter->state != __IAVF_RESETTING) {
1016                 /* cancel any current operation */
1017                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1018                 /* Schedule operations to close down the HW. Don't wait
1019                  * here for this to complete. The watchdog is still running
1020                  * and it will take care of this.
1021                  */
1022                 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
1023                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1024                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1025                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1026                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1027                 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1028         }
1029
1030         mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1031 }
1032
1033 /**
1034  * iavf_acquire_msix_vectors - Setup the MSIX capability
1035  * @adapter: board private structure
1036  * @vectors: number of vectors to request
1037  *
1038  * Work with the OS to set up the MSIX vectors needed.
1039  *
1040  * Returns 0 on success, negative on failure
1041  **/
1042 static int
1043 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1044 {
1045         int err, vector_threshold;
1046
1047         /* We'll want at least 3 (vector_threshold):
1048          * 0) Other (Admin Queue and link, mostly)
1049          * 1) TxQ[0] Cleanup
1050          * 2) RxQ[0] Cleanup
1051          */
1052         vector_threshold = MIN_MSIX_COUNT;
1053
1054         /* The more we get, the more we will assign to Tx/Rx Cleanup
1055          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1056          * Right now, we simply care about how many we'll get; we'll
1057          * set them up later while requesting irq's.
1058          */
1059         err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1060                                     vector_threshold, vectors);
1061         if (err < 0) {
1062                 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1063                 kfree(adapter->msix_entries);
1064                 adapter->msix_entries = NULL;
1065                 return err;
1066         }
1067
1068         /* Adjust for only the vectors we'll use, which is minimum
1069          * of max_msix_q_vectors + NONQ_VECS, or the number of
1070          * vectors we were allocated.
1071          */
1072         adapter->num_msix_vectors = err;
1073         return 0;
1074 }
1075
1076 /**
1077  * iavf_free_queues - Free memory for all rings
1078  * @adapter: board private structure to initialize
1079  *
1080  * Free all of the memory associated with queue pairs.
1081  **/
1082 static void iavf_free_queues(struct iavf_adapter *adapter)
1083 {
1084         if (!adapter->vsi_res)
1085                 return;
1086         adapter->num_active_queues = 0;
1087         kfree(adapter->tx_rings);
1088         adapter->tx_rings = NULL;
1089         kfree(adapter->rx_rings);
1090         adapter->rx_rings = NULL;
1091 }
1092
1093 /**
1094  * iavf_alloc_queues - Allocate memory for all rings
1095  * @adapter: board private structure to initialize
1096  *
1097  * We allocate one ring per queue at run-time since we don't know the
1098  * number of queues at compile-time.  The polling_netdev array is
1099  * intended for Multiqueue, but should work fine with a single queue.
1100  **/
1101 static int iavf_alloc_queues(struct iavf_adapter *adapter)
1102 {
1103         int i, num_active_queues;
1104
1105         /* If we're in reset reallocating queues we don't actually know yet for
1106          * certain the PF gave us the number of queues we asked for but we'll
1107          * assume it did.  Once basic reset is finished we'll confirm once we
1108          * start negotiating config with PF.
1109          */
1110         if (adapter->num_req_queues)
1111                 num_active_queues = adapter->num_req_queues;
1112         else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1113                  adapter->num_tc)
1114                 num_active_queues = adapter->ch_config.total_qps;
1115         else
1116                 num_active_queues = min_t(int,
1117                                           adapter->vsi_res->num_queue_pairs,
1118                                           (int)(num_online_cpus()));
1119
1120
1121         adapter->tx_rings = kcalloc(num_active_queues,
1122                                     sizeof(struct iavf_ring), GFP_KERNEL);
1123         if (!adapter->tx_rings)
1124                 goto err_out;
1125         adapter->rx_rings = kcalloc(num_active_queues,
1126                                     sizeof(struct iavf_ring), GFP_KERNEL);
1127         if (!adapter->rx_rings)
1128                 goto err_out;
1129
1130         for (i = 0; i < num_active_queues; i++) {
1131                 struct iavf_ring *tx_ring;
1132                 struct iavf_ring *rx_ring;
1133
1134                 tx_ring = &adapter->tx_rings[i];
1135
1136                 tx_ring->queue_index = i;
1137                 tx_ring->netdev = adapter->netdev;
1138                 tx_ring->dev = &adapter->pdev->dev;
1139                 tx_ring->count = adapter->tx_desc_count;
1140                 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1141                 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1142                         tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1143
1144                 rx_ring = &adapter->rx_rings[i];
1145                 rx_ring->queue_index = i;
1146                 rx_ring->netdev = adapter->netdev;
1147                 rx_ring->dev = &adapter->pdev->dev;
1148                 rx_ring->count = adapter->rx_desc_count;
1149                 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1150         }
1151
1152         adapter->num_active_queues = num_active_queues;
1153
1154         return 0;
1155
1156 err_out:
1157         iavf_free_queues(adapter);
1158         return -ENOMEM;
1159 }
1160
1161 /**
1162  * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1163  * @adapter: board private structure to initialize
1164  *
1165  * Attempt to configure the interrupts using the best available
1166  * capabilities of the hardware and the kernel.
1167  **/
1168 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1169 {
1170         int vector, v_budget;
1171         int pairs = 0;
1172         int err = 0;
1173
1174         if (!adapter->vsi_res) {
1175                 err = -EIO;
1176                 goto out;
1177         }
1178         pairs = adapter->num_active_queues;
1179
1180         /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1181          * us much good if we have more vectors than CPUs. However, we already
1182          * limit the total number of queues by the number of CPUs so we do not
1183          * need any further limiting here.
1184          */
1185         v_budget = min_t(int, pairs + NONQ_VECS,
1186                          (int)adapter->vf_res->max_vectors);
1187
1188         adapter->msix_entries = kcalloc(v_budget,
1189                                         sizeof(struct msix_entry), GFP_KERNEL);
1190         if (!adapter->msix_entries) {
1191                 err = -ENOMEM;
1192                 goto out;
1193         }
1194
1195         for (vector = 0; vector < v_budget; vector++)
1196                 adapter->msix_entries[vector].entry = vector;
1197
1198         err = iavf_acquire_msix_vectors(adapter, v_budget);
1199
1200 out:
1201         netif_set_real_num_rx_queues(adapter->netdev, pairs);
1202         netif_set_real_num_tx_queues(adapter->netdev, pairs);
1203         return err;
1204 }
1205
1206 /**
1207  * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1208  * @adapter: board private structure
1209  *
1210  * Return 0 on success, negative on failure
1211  **/
1212 static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1213 {
1214         struct iavf_aqc_get_set_rss_key_data *rss_key =
1215                 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1216         struct iavf_hw *hw = &adapter->hw;
1217         int ret = 0;
1218
1219         if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1220                 /* bail because we already have a command pending */
1221                 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1222                         adapter->current_op);
1223                 return -EBUSY;
1224         }
1225
1226         ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1227         if (ret) {
1228                 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1229                         iavf_stat_str(hw, ret),
1230                         iavf_aq_str(hw, hw->aq.asq_last_status));
1231                 return ret;
1232
1233         }
1234
1235         ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1236                                   adapter->rss_lut, adapter->rss_lut_size);
1237         if (ret) {
1238                 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1239                         iavf_stat_str(hw, ret),
1240                         iavf_aq_str(hw, hw->aq.asq_last_status));
1241         }
1242
1243         return ret;
1244
1245 }
1246
1247 /**
1248  * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1249  * @adapter: board private structure
1250  *
1251  * Returns 0 on success, negative on failure
1252  **/
1253 static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1254 {
1255         struct iavf_hw *hw = &adapter->hw;
1256         u32 *dw;
1257         u16 i;
1258
1259         dw = (u32 *)adapter->rss_key;
1260         for (i = 0; i <= adapter->rss_key_size / 4; i++)
1261                 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1262
1263         dw = (u32 *)adapter->rss_lut;
1264         for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1265                 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1266
1267         iavf_flush(hw);
1268
1269         return 0;
1270 }
1271
1272 /**
1273  * iavf_config_rss - Configure RSS keys and lut
1274  * @adapter: board private structure
1275  *
1276  * Returns 0 on success, negative on failure
1277  **/
1278 int iavf_config_rss(struct iavf_adapter *adapter)
1279 {
1280
1281         if (RSS_PF(adapter)) {
1282                 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1283                                         IAVF_FLAG_AQ_SET_RSS_KEY;
1284                 return 0;
1285         } else if (RSS_AQ(adapter)) {
1286                 return iavf_config_rss_aq(adapter);
1287         } else {
1288                 return iavf_config_rss_reg(adapter);
1289         }
1290 }
1291
1292 /**
1293  * iavf_fill_rss_lut - Fill the lut with default values
1294  * @adapter: board private structure
1295  **/
1296 static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1297 {
1298         u16 i;
1299
1300         for (i = 0; i < adapter->rss_lut_size; i++)
1301                 adapter->rss_lut[i] = i % adapter->num_active_queues;
1302 }
1303
1304 /**
1305  * iavf_init_rss - Prepare for RSS
1306  * @adapter: board private structure
1307  *
1308  * Return 0 on success, negative on failure
1309  **/
1310 static int iavf_init_rss(struct iavf_adapter *adapter)
1311 {
1312         struct iavf_hw *hw = &adapter->hw;
1313         int ret;
1314
1315         if (!RSS_PF(adapter)) {
1316                 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1317                 if (adapter->vf_res->vf_cap_flags &
1318                     VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1319                         adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1320                 else
1321                         adapter->hena = IAVF_DEFAULT_RSS_HENA;
1322
1323                 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1324                 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1325         }
1326
1327         iavf_fill_rss_lut(adapter);
1328         netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1329         ret = iavf_config_rss(adapter);
1330
1331         return ret;
1332 }
1333
1334 /**
1335  * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1336  * @adapter: board private structure to initialize
1337  *
1338  * We allocate one q_vector per queue interrupt.  If allocation fails we
1339  * return -ENOMEM.
1340  **/
1341 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1342 {
1343         int q_idx = 0, num_q_vectors;
1344         struct iavf_q_vector *q_vector;
1345
1346         num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1347         adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1348                                      GFP_KERNEL);
1349         if (!adapter->q_vectors)
1350                 return -ENOMEM;
1351
1352         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1353                 q_vector = &adapter->q_vectors[q_idx];
1354                 q_vector->adapter = adapter;
1355                 q_vector->vsi = &adapter->vsi;
1356                 q_vector->v_idx = q_idx;
1357                 q_vector->reg_idx = q_idx;
1358                 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1359                 netif_napi_add(adapter->netdev, &q_vector->napi,
1360                                iavf_napi_poll, NAPI_POLL_WEIGHT);
1361         }
1362
1363         return 0;
1364 }
1365
1366 /**
1367  * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1368  * @adapter: board private structure to initialize
1369  *
1370  * This function frees the memory allocated to the q_vectors.  In addition if
1371  * NAPI is enabled it will delete any references to the NAPI struct prior
1372  * to freeing the q_vector.
1373  **/
1374 static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1375 {
1376         int q_idx, num_q_vectors;
1377         int napi_vectors;
1378
1379         if (!adapter->q_vectors)
1380                 return;
1381
1382         num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1383         napi_vectors = adapter->num_active_queues;
1384
1385         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1386                 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1387
1388                 if (q_idx < napi_vectors)
1389                         netif_napi_del(&q_vector->napi);
1390         }
1391         kfree(adapter->q_vectors);
1392         adapter->q_vectors = NULL;
1393 }
1394
1395 /**
1396  * iavf_reset_interrupt_capability - Reset MSIX setup
1397  * @adapter: board private structure
1398  *
1399  **/
1400 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1401 {
1402         if (!adapter->msix_entries)
1403                 return;
1404
1405         pci_disable_msix(adapter->pdev);
1406         kfree(adapter->msix_entries);
1407         adapter->msix_entries = NULL;
1408 }
1409
1410 /**
1411  * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1412  * @adapter: board private structure to initialize
1413  *
1414  **/
1415 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1416 {
1417         int err;
1418
1419         err = iavf_alloc_queues(adapter);
1420         if (err) {
1421                 dev_err(&adapter->pdev->dev,
1422                         "Unable to allocate memory for queues\n");
1423                 goto err_alloc_queues;
1424         }
1425
1426         rtnl_lock();
1427         err = iavf_set_interrupt_capability(adapter);
1428         rtnl_unlock();
1429         if (err) {
1430                 dev_err(&adapter->pdev->dev,
1431                         "Unable to setup interrupt capabilities\n");
1432                 goto err_set_interrupt;
1433         }
1434
1435         err = iavf_alloc_q_vectors(adapter);
1436         if (err) {
1437                 dev_err(&adapter->pdev->dev,
1438                         "Unable to allocate memory for queue vectors\n");
1439                 goto err_alloc_q_vectors;
1440         }
1441
1442         /* If we've made it so far while ADq flag being ON, then we haven't
1443          * bailed out anywhere in middle. And ADq isn't just enabled but actual
1444          * resources have been allocated in the reset path.
1445          * Now we can truly claim that ADq is enabled.
1446          */
1447         if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1448             adapter->num_tc)
1449                 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1450                          adapter->num_tc);
1451
1452         dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1453                  (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1454                  adapter->num_active_queues);
1455
1456         return 0;
1457 err_alloc_q_vectors:
1458         iavf_reset_interrupt_capability(adapter);
1459 err_set_interrupt:
1460         iavf_free_queues(adapter);
1461 err_alloc_queues:
1462         return err;
1463 }
1464
1465 /**
1466  * iavf_free_rss - Free memory used by RSS structs
1467  * @adapter: board private structure
1468  **/
1469 static void iavf_free_rss(struct iavf_adapter *adapter)
1470 {
1471         kfree(adapter->rss_key);
1472         adapter->rss_key = NULL;
1473
1474         kfree(adapter->rss_lut);
1475         adapter->rss_lut = NULL;
1476 }
1477
1478 /**
1479  * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1480  * @adapter: board private structure
1481  *
1482  * Returns 0 on success, negative on failure
1483  **/
1484 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1485 {
1486         struct net_device *netdev = adapter->netdev;
1487         int err;
1488
1489         if (netif_running(netdev))
1490                 iavf_free_traffic_irqs(adapter);
1491         iavf_free_misc_irq(adapter);
1492         iavf_reset_interrupt_capability(adapter);
1493         iavf_free_q_vectors(adapter);
1494         iavf_free_queues(adapter);
1495
1496         err =  iavf_init_interrupt_scheme(adapter);
1497         if (err)
1498                 goto err;
1499
1500         netif_tx_stop_all_queues(netdev);
1501
1502         err = iavf_request_misc_irq(adapter);
1503         if (err)
1504                 goto err;
1505
1506         set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1507
1508         iavf_map_rings_to_vectors(adapter);
1509
1510         if (RSS_AQ(adapter))
1511                 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
1512         else
1513                 err = iavf_init_rss(adapter);
1514 err:
1515         return err;
1516 }
1517
1518 /**
1519  * iavf_process_aq_command - process aq_required flags
1520  * and sends aq command
1521  * @adapter: pointer to iavf adapter structure
1522  *
1523  * Returns 0 on success
1524  * Returns error code if no command was sent
1525  * or error code if the command failed.
1526  **/
1527 static int iavf_process_aq_command(struct iavf_adapter *adapter)
1528 {
1529         if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
1530                 return iavf_send_vf_config_msg(adapter);
1531         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1532                 iavf_disable_queues(adapter);
1533                 return 0;
1534         }
1535
1536         if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
1537                 iavf_map_queues(adapter);
1538                 return 0;
1539         }
1540
1541         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
1542                 iavf_add_ether_addrs(adapter);
1543                 return 0;
1544         }
1545
1546         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
1547                 iavf_add_vlans(adapter);
1548                 return 0;
1549         }
1550
1551         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
1552                 iavf_del_ether_addrs(adapter);
1553                 return 0;
1554         }
1555
1556         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
1557                 iavf_del_vlans(adapter);
1558                 return 0;
1559         }
1560
1561         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1562                 iavf_enable_vlan_stripping(adapter);
1563                 return 0;
1564         }
1565
1566         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1567                 iavf_disable_vlan_stripping(adapter);
1568                 return 0;
1569         }
1570
1571         if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
1572                 iavf_configure_queues(adapter);
1573                 return 0;
1574         }
1575
1576         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
1577                 iavf_enable_queues(adapter);
1578                 return 0;
1579         }
1580
1581         if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
1582                 /* This message goes straight to the firmware, not the
1583                  * PF, so we don't have to set current_op as we will
1584                  * not get a response through the ARQ.
1585                  */
1586                 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
1587                 return 0;
1588         }
1589         if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
1590                 iavf_get_hena(adapter);
1591                 return 0;
1592         }
1593         if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
1594                 iavf_set_hena(adapter);
1595                 return 0;
1596         }
1597         if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
1598                 iavf_set_rss_key(adapter);
1599                 return 0;
1600         }
1601         if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
1602                 iavf_set_rss_lut(adapter);
1603                 return 0;
1604         }
1605
1606         if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
1607                 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
1608                                        FLAG_VF_MULTICAST_PROMISC);
1609                 return 0;
1610         }
1611
1612         if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
1613                 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
1614                 return 0;
1615         }
1616
1617         if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) &&
1618             (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1619                 iavf_set_promiscuous(adapter, 0);
1620                 return 0;
1621         }
1622
1623         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
1624                 iavf_enable_channels(adapter);
1625                 return 0;
1626         }
1627
1628         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
1629                 iavf_disable_channels(adapter);
1630                 return 0;
1631         }
1632         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1633                 iavf_add_cloud_filter(adapter);
1634                 return 0;
1635         }
1636
1637         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1638                 iavf_del_cloud_filter(adapter);
1639                 return 0;
1640         }
1641         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1642                 iavf_del_cloud_filter(adapter);
1643                 return 0;
1644         }
1645         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1646                 iavf_add_cloud_filter(adapter);
1647                 return 0;
1648         }
1649         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
1650                 iavf_add_fdir_filter(adapter);
1651                 return IAVF_SUCCESS;
1652         }
1653         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
1654                 iavf_del_fdir_filter(adapter);
1655                 return IAVF_SUCCESS;
1656         }
1657         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
1658                 iavf_add_adv_rss_cfg(adapter);
1659                 return 0;
1660         }
1661         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
1662                 iavf_del_adv_rss_cfg(adapter);
1663                 return 0;
1664         }
1665         return -EAGAIN;
1666 }
1667
1668 /**
1669  * iavf_startup - first step of driver startup
1670  * @adapter: board private structure
1671  *
1672  * Function process __IAVF_STARTUP driver state.
1673  * When success the state is changed to __IAVF_INIT_VERSION_CHECK
1674  * when fails it returns -EAGAIN
1675  **/
1676 static int iavf_startup(struct iavf_adapter *adapter)
1677 {
1678         struct pci_dev *pdev = adapter->pdev;
1679         struct iavf_hw *hw = &adapter->hw;
1680         int err;
1681
1682         WARN_ON(adapter->state != __IAVF_STARTUP);
1683
1684         /* driver loaded, probe complete */
1685         adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
1686         adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
1687         err = iavf_set_mac_type(hw);
1688         if (err) {
1689                 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err);
1690                 goto err;
1691         }
1692
1693         err = iavf_check_reset_complete(hw);
1694         if (err) {
1695                 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
1696                          err);
1697                 goto err;
1698         }
1699         hw->aq.num_arq_entries = IAVF_AQ_LEN;
1700         hw->aq.num_asq_entries = IAVF_AQ_LEN;
1701         hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
1702         hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
1703
1704         err = iavf_init_adminq(hw);
1705         if (err) {
1706                 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err);
1707                 goto err;
1708         }
1709         err = iavf_send_api_ver(adapter);
1710         if (err) {
1711                 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
1712                 iavf_shutdown_adminq(hw);
1713                 goto err;
1714         }
1715         adapter->state = __IAVF_INIT_VERSION_CHECK;
1716 err:
1717         return err;
1718 }
1719
1720 /**
1721  * iavf_init_version_check - second step of driver startup
1722  * @adapter: board private structure
1723  *
1724  * Function process __IAVF_INIT_VERSION_CHECK driver state.
1725  * When success the state is changed to __IAVF_INIT_GET_RESOURCES
1726  * when fails it returns -EAGAIN
1727  **/
1728 static int iavf_init_version_check(struct iavf_adapter *adapter)
1729 {
1730         struct pci_dev *pdev = adapter->pdev;
1731         struct iavf_hw *hw = &adapter->hw;
1732         int err = -EAGAIN;
1733
1734         WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
1735
1736         if (!iavf_asq_done(hw)) {
1737                 dev_err(&pdev->dev, "Admin queue command never completed\n");
1738                 iavf_shutdown_adminq(hw);
1739                 adapter->state = __IAVF_STARTUP;
1740                 goto err;
1741         }
1742
1743         /* aq msg sent, awaiting reply */
1744         err = iavf_verify_api_ver(adapter);
1745         if (err) {
1746                 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
1747                         err = iavf_send_api_ver(adapter);
1748                 else
1749                         dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
1750                                 adapter->pf_version.major,
1751                                 adapter->pf_version.minor,
1752                                 VIRTCHNL_VERSION_MAJOR,
1753                                 VIRTCHNL_VERSION_MINOR);
1754                 goto err;
1755         }
1756         err = iavf_send_vf_config_msg(adapter);
1757         if (err) {
1758                 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
1759                         err);
1760                 goto err;
1761         }
1762         adapter->state = __IAVF_INIT_GET_RESOURCES;
1763
1764 err:
1765         return err;
1766 }
1767
1768 /**
1769  * iavf_init_get_resources - third step of driver startup
1770  * @adapter: board private structure
1771  *
1772  * Function process __IAVF_INIT_GET_RESOURCES driver state and
1773  * finishes driver initialization procedure.
1774  * When success the state is changed to __IAVF_DOWN
1775  * when fails it returns -EAGAIN
1776  **/
1777 static int iavf_init_get_resources(struct iavf_adapter *adapter)
1778 {
1779         struct net_device *netdev = adapter->netdev;
1780         struct pci_dev *pdev = adapter->pdev;
1781         struct iavf_hw *hw = &adapter->hw;
1782         int err;
1783
1784         WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
1785         /* aq msg sent, awaiting reply */
1786         if (!adapter->vf_res) {
1787                 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
1788                                           GFP_KERNEL);
1789                 if (!adapter->vf_res) {
1790                         err = -ENOMEM;
1791                         goto err;
1792                 }
1793         }
1794         err = iavf_get_vf_config(adapter);
1795         if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
1796                 err = iavf_send_vf_config_msg(adapter);
1797                 goto err;
1798         } else if (err == IAVF_ERR_PARAM) {
1799                 /* We only get ERR_PARAM if the device is in a very bad
1800                  * state or if we've been disabled for previous bad
1801                  * behavior. Either way, we're done now.
1802                  */
1803                 iavf_shutdown_adminq(hw);
1804                 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
1805                 return 0;
1806         }
1807         if (err) {
1808                 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
1809                 goto err_alloc;
1810         }
1811
1812         err = iavf_process_config(adapter);
1813         if (err)
1814                 goto err_alloc;
1815         adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1816
1817         adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
1818
1819         netdev->netdev_ops = &iavf_netdev_ops;
1820         iavf_set_ethtool_ops(netdev);
1821         netdev->watchdog_timeo = 5 * HZ;
1822
1823         /* MTU range: 68 - 9710 */
1824         netdev->min_mtu = ETH_MIN_MTU;
1825         netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
1826
1827         if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1828                 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
1829                          adapter->hw.mac.addr);
1830                 eth_hw_addr_random(netdev);
1831                 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1832         } else {
1833                 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
1834                 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
1835         }
1836
1837         adapter->tx_desc_count = IAVF_DEFAULT_TXD;
1838         adapter->rx_desc_count = IAVF_DEFAULT_RXD;
1839         err = iavf_init_interrupt_scheme(adapter);
1840         if (err)
1841                 goto err_sw_init;
1842         iavf_map_rings_to_vectors(adapter);
1843         if (adapter->vf_res->vf_cap_flags &
1844                 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1845                 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
1846
1847         err = iavf_request_misc_irq(adapter);
1848         if (err)
1849                 goto err_sw_init;
1850
1851         netif_carrier_off(netdev);
1852         adapter->link_up = false;
1853
1854         /* set the semaphore to prevent any callbacks after device registration
1855          * up to time when state of driver will be set to __IAVF_DOWN
1856          */
1857         rtnl_lock();
1858         if (!adapter->netdev_registered) {
1859                 err = register_netdevice(netdev);
1860                 if (err) {
1861                         rtnl_unlock();
1862                         goto err_register;
1863                 }
1864         }
1865
1866         adapter->netdev_registered = true;
1867
1868         netif_tx_stop_all_queues(netdev);
1869         if (CLIENT_ALLOWED(adapter)) {
1870                 err = iavf_lan_add_device(adapter);
1871                 if (err)
1872                         dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
1873                                  err);
1874         }
1875         dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
1876         if (netdev->features & NETIF_F_GRO)
1877                 dev_info(&pdev->dev, "GRO is enabled\n");
1878
1879         adapter->state = __IAVF_DOWN;
1880         set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1881         rtnl_unlock();
1882
1883         iavf_misc_irq_enable(adapter);
1884         wake_up(&adapter->down_waitqueue);
1885
1886         adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
1887         adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
1888         if (!adapter->rss_key || !adapter->rss_lut) {
1889                 err = -ENOMEM;
1890                 goto err_mem;
1891         }
1892         if (RSS_AQ(adapter))
1893                 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
1894         else
1895                 iavf_init_rss(adapter);
1896
1897         return err;
1898 err_mem:
1899         iavf_free_rss(adapter);
1900 err_register:
1901         iavf_free_misc_irq(adapter);
1902 err_sw_init:
1903         iavf_reset_interrupt_capability(adapter);
1904 err_alloc:
1905         kfree(adapter->vf_res);
1906         adapter->vf_res = NULL;
1907 err:
1908         return err;
1909 }
1910
1911 /**
1912  * iavf_watchdog_task - Periodic call-back task
1913  * @work: pointer to work_struct
1914  **/
1915 static void iavf_watchdog_task(struct work_struct *work)
1916 {
1917         struct iavf_adapter *adapter = container_of(work,
1918                                                     struct iavf_adapter,
1919                                                     watchdog_task.work);
1920         struct iavf_hw *hw = &adapter->hw;
1921         u32 reg_val;
1922
1923         if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section))
1924                 goto restart_watchdog;
1925
1926         if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
1927                 adapter->state = __IAVF_COMM_FAILED;
1928
1929         switch (adapter->state) {
1930         case __IAVF_COMM_FAILED:
1931                 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
1932                           IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1933                 if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
1934                     reg_val == VIRTCHNL_VFR_COMPLETED) {
1935                         /* A chance for redemption! */
1936                         dev_err(&adapter->pdev->dev,
1937                                 "Hardware came out of reset. Attempting reinit.\n");
1938                         adapter->state = __IAVF_STARTUP;
1939                         adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
1940                         queue_delayed_work(iavf_wq, &adapter->init_task, 10);
1941                         clear_bit(__IAVF_IN_CRITICAL_TASK,
1942                                   &adapter->crit_section);
1943                         /* Don't reschedule the watchdog, since we've restarted
1944                          * the init task. When init_task contacts the PF and
1945                          * gets everything set up again, it'll restart the
1946                          * watchdog for us. Down, boy. Sit. Stay. Woof.
1947                          */
1948                         return;
1949                 }
1950                 adapter->aq_required = 0;
1951                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1952                 clear_bit(__IAVF_IN_CRITICAL_TASK,
1953                           &adapter->crit_section);
1954                 queue_delayed_work(iavf_wq,
1955                                    &adapter->watchdog_task,
1956                                    msecs_to_jiffies(10));
1957                 goto watchdog_done;
1958         case __IAVF_RESETTING:
1959                 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1960                 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
1961                 return;
1962         case __IAVF_DOWN:
1963         case __IAVF_DOWN_PENDING:
1964         case __IAVF_TESTING:
1965         case __IAVF_RUNNING:
1966                 if (adapter->current_op) {
1967                         if (!iavf_asq_done(hw)) {
1968                                 dev_dbg(&adapter->pdev->dev,
1969                                         "Admin queue timeout\n");
1970                                 iavf_send_api_ver(adapter);
1971                         }
1972                 } else {
1973                         /* An error will be returned if no commands were
1974                          * processed; use this opportunity to update stats
1975                          */
1976                         if (iavf_process_aq_command(adapter) &&
1977                             adapter->state == __IAVF_RUNNING)
1978                                 iavf_request_stats(adapter);
1979                 }
1980                 break;
1981         case __IAVF_REMOVE:
1982                 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1983                 return;
1984         default:
1985                 goto restart_watchdog;
1986         }
1987
1988                 /* check for hw reset */
1989         reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
1990         if (!reg_val) {
1991                 adapter->state = __IAVF_RESETTING;
1992                 adapter->flags |= IAVF_FLAG_RESET_PENDING;
1993                 adapter->aq_required = 0;
1994                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1995                 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1996                 queue_work(iavf_wq, &adapter->reset_task);
1997                 goto watchdog_done;
1998         }
1999
2000         schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
2001 watchdog_done:
2002         if (adapter->state == __IAVF_RUNNING ||
2003             adapter->state == __IAVF_COMM_FAILED)
2004                 iavf_detect_recover_hung(&adapter->vsi);
2005         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2006 restart_watchdog:
2007         if (adapter->aq_required)
2008                 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2009                                    msecs_to_jiffies(20));
2010         else
2011                 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2012         queue_work(iavf_wq, &adapter->adminq_task);
2013 }
2014
2015 static void iavf_disable_vf(struct iavf_adapter *adapter)
2016 {
2017         struct iavf_mac_filter *f, *ftmp;
2018         struct iavf_vlan_filter *fv, *fvtmp;
2019         struct iavf_cloud_filter *cf, *cftmp;
2020
2021         adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2022
2023         /* We don't use netif_running() because it may be true prior to
2024          * ndo_open() returning, so we can't assume it means all our open
2025          * tasks have finished, since we're not holding the rtnl_lock here.
2026          */
2027         if (adapter->state == __IAVF_RUNNING) {
2028                 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2029                 netif_carrier_off(adapter->netdev);
2030                 netif_tx_disable(adapter->netdev);
2031                 adapter->link_up = false;
2032                 iavf_napi_disable_all(adapter);
2033                 iavf_irq_disable(adapter);
2034                 iavf_free_traffic_irqs(adapter);
2035                 iavf_free_all_tx_resources(adapter);
2036                 iavf_free_all_rx_resources(adapter);
2037         }
2038
2039         spin_lock_bh(&adapter->mac_vlan_list_lock);
2040
2041         /* Delete all of the filters */
2042         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2043                 list_del(&f->list);
2044                 kfree(f);
2045         }
2046
2047         list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2048                 list_del(&fv->list);
2049                 kfree(fv);
2050         }
2051
2052         spin_unlock_bh(&adapter->mac_vlan_list_lock);
2053
2054         spin_lock_bh(&adapter->cloud_filter_list_lock);
2055         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2056                 list_del(&cf->list);
2057                 kfree(cf);
2058                 adapter->num_cloud_filters--;
2059         }
2060         spin_unlock_bh(&adapter->cloud_filter_list_lock);
2061
2062         iavf_free_misc_irq(adapter);
2063         iavf_reset_interrupt_capability(adapter);
2064         iavf_free_queues(adapter);
2065         iavf_free_q_vectors(adapter);
2066         memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2067         iavf_shutdown_adminq(&adapter->hw);
2068         adapter->netdev->flags &= ~IFF_UP;
2069         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2070         adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2071         adapter->state = __IAVF_DOWN;
2072         wake_up(&adapter->down_waitqueue);
2073         dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2074 }
2075
2076 /**
2077  * iavf_reset_task - Call-back task to handle hardware reset
2078  * @work: pointer to work_struct
2079  *
2080  * During reset we need to shut down and reinitialize the admin queue
2081  * before we can use it to communicate with the PF again. We also clear
2082  * and reinit the rings because that context is lost as well.
2083  **/
2084 static void iavf_reset_task(struct work_struct *work)
2085 {
2086         struct iavf_adapter *adapter = container_of(work,
2087                                                       struct iavf_adapter,
2088                                                       reset_task);
2089         struct virtchnl_vf_resource *vfres = adapter->vf_res;
2090         struct net_device *netdev = adapter->netdev;
2091         struct iavf_hw *hw = &adapter->hw;
2092         struct iavf_mac_filter *f, *ftmp;
2093         struct iavf_vlan_filter *vlf;
2094         struct iavf_cloud_filter *cf;
2095         u32 reg_val;
2096         int i = 0, err;
2097         bool running;
2098
2099         /* When device is being removed it doesn't make sense to run the reset
2100          * task, just return in such a case.
2101          */
2102         if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
2103                 return;
2104
2105         while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
2106                                 &adapter->crit_section))
2107                 usleep_range(500, 1000);
2108         if (CLIENT_ENABLED(adapter)) {
2109                 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
2110                                     IAVF_FLAG_CLIENT_NEEDS_CLOSE |
2111                                     IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
2112                                     IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
2113                 cancel_delayed_work_sync(&adapter->client_task);
2114                 iavf_notify_client_close(&adapter->vsi, true);
2115         }
2116         iavf_misc_irq_disable(adapter);
2117         if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2118                 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
2119                 /* Restart the AQ here. If we have been reset but didn't
2120                  * detect it, or if the PF had to reinit, our AQ will be hosed.
2121                  */
2122                 iavf_shutdown_adminq(hw);
2123                 iavf_init_adminq(hw);
2124                 iavf_request_reset(adapter);
2125         }
2126         adapter->flags |= IAVF_FLAG_RESET_PENDING;
2127
2128         /* poll until we see the reset actually happen */
2129         for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
2130                 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2131                           IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2132                 if (!reg_val)
2133                         break;
2134                 usleep_range(5000, 10000);
2135         }
2136         if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
2137                 dev_info(&adapter->pdev->dev, "Never saw reset\n");
2138                 goto continue_reset; /* act like the reset happened */
2139         }
2140
2141         /* wait until the reset is complete and the PF is responding to us */
2142         for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
2143                 /* sleep first to make sure a minimum wait time is met */
2144                 msleep(IAVF_RESET_WAIT_MS);
2145
2146                 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2147                           IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2148                 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
2149                         break;
2150         }
2151
2152         pci_set_master(adapter->pdev);
2153
2154         if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
2155                 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
2156                         reg_val);
2157                 iavf_disable_vf(adapter);
2158                 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2159                 return; /* Do not attempt to reinit. It's dead, Jim. */
2160         }
2161
2162 continue_reset:
2163         /* We don't use netif_running() because it may be true prior to
2164          * ndo_open() returning, so we can't assume it means all our open
2165          * tasks have finished, since we're not holding the rtnl_lock here.
2166          */
2167         running = ((adapter->state == __IAVF_RUNNING) ||
2168                    (adapter->state == __IAVF_RESETTING));
2169
2170         if (running) {
2171                 netif_carrier_off(netdev);
2172                 netif_tx_stop_all_queues(netdev);
2173                 adapter->link_up = false;
2174                 iavf_napi_disable_all(adapter);
2175         }
2176         iavf_irq_disable(adapter);
2177
2178         adapter->state = __IAVF_RESETTING;
2179         adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2180
2181         /* free the Tx/Rx rings and descriptors, might be better to just
2182          * re-use them sometime in the future
2183          */
2184         iavf_free_all_rx_resources(adapter);
2185         iavf_free_all_tx_resources(adapter);
2186
2187         adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
2188         /* kill and reinit the admin queue */
2189         iavf_shutdown_adminq(hw);
2190         adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2191         err = iavf_init_adminq(hw);
2192         if (err)
2193                 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
2194                          err);
2195         adapter->aq_required = 0;
2196
2197         if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
2198                 err = iavf_reinit_interrupt_scheme(adapter);
2199                 if (err)
2200                         goto reset_err;
2201         }
2202
2203         adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
2204         adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
2205
2206         spin_lock_bh(&adapter->mac_vlan_list_lock);
2207
2208         /* Delete filter for the current MAC address, it could have
2209          * been changed by the PF via administratively set MAC.
2210          * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
2211          */
2212         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2213                 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
2214                         list_del(&f->list);
2215                         kfree(f);
2216                 }
2217         }
2218         /* re-add all MAC filters */
2219         list_for_each_entry(f, &adapter->mac_filter_list, list) {
2220                 f->add = true;
2221         }
2222         /* re-add all VLAN filters */
2223         list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
2224                 vlf->add = true;
2225         }
2226
2227         spin_unlock_bh(&adapter->mac_vlan_list_lock);
2228
2229         /* check if TCs are running and re-add all cloud filters */
2230         spin_lock_bh(&adapter->cloud_filter_list_lock);
2231         if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
2232             adapter->num_tc) {
2233                 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
2234                         cf->add = true;
2235                 }
2236         }
2237         spin_unlock_bh(&adapter->cloud_filter_list_lock);
2238
2239         adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
2240         adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2241         adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
2242         iavf_misc_irq_enable(adapter);
2243
2244         mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
2245
2246         /* We were running when the reset started, so we need to restore some
2247          * state here.
2248          */
2249         if (running) {
2250                 /* allocate transmit descriptors */
2251                 err = iavf_setup_all_tx_resources(adapter);
2252                 if (err)
2253                         goto reset_err;
2254
2255                 /* allocate receive descriptors */
2256                 err = iavf_setup_all_rx_resources(adapter);
2257                 if (err)
2258                         goto reset_err;
2259
2260                 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
2261                         err = iavf_request_traffic_irqs(adapter, netdev->name);
2262                         if (err)
2263                                 goto reset_err;
2264
2265                         adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2266                 }
2267
2268                 iavf_configure(adapter);
2269
2270                 iavf_up_complete(adapter);
2271
2272                 iavf_irq_enable(adapter, true);
2273         } else {
2274                 adapter->state = __IAVF_DOWN;
2275                 wake_up(&adapter->down_waitqueue);
2276         }
2277         clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2278         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2279
2280         return;
2281 reset_err:
2282         clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2283         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2284         dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
2285         iavf_close(netdev);
2286 }
2287
2288 /**
2289  * iavf_adminq_task - worker thread to clean the admin queue
2290  * @work: pointer to work_struct containing our data
2291  **/
2292 static void iavf_adminq_task(struct work_struct *work)
2293 {
2294         struct iavf_adapter *adapter =
2295                 container_of(work, struct iavf_adapter, adminq_task);
2296         struct iavf_hw *hw = &adapter->hw;
2297         struct iavf_arq_event_info event;
2298         enum virtchnl_ops v_op;
2299         enum iavf_status ret, v_ret;
2300         u32 val, oldval;
2301         u16 pending;
2302
2303         if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2304                 goto out;
2305
2306         event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
2307         event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
2308         if (!event.msg_buf)
2309                 goto out;
2310
2311         do {
2312                 ret = iavf_clean_arq_element(hw, &event, &pending);
2313                 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
2314                 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
2315
2316                 if (ret || !v_op)
2317                         break; /* No event to process or error cleaning ARQ */
2318
2319                 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
2320                                          event.msg_len);
2321                 if (pending != 0)
2322                         memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
2323         } while (pending);
2324
2325         if ((adapter->flags &
2326              (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
2327             adapter->state == __IAVF_RESETTING)
2328                 goto freedom;
2329
2330         /* check for error indications */
2331         val = rd32(hw, hw->aq.arq.len);
2332         if (val == 0xdeadbeef) /* indicates device in reset */
2333                 goto freedom;
2334         oldval = val;
2335         if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
2336                 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
2337                 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
2338         }
2339         if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
2340                 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
2341                 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
2342         }
2343         if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
2344                 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
2345                 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
2346         }
2347         if (oldval != val)
2348                 wr32(hw, hw->aq.arq.len, val);
2349
2350         val = rd32(hw, hw->aq.asq.len);
2351         oldval = val;
2352         if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
2353                 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
2354                 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
2355         }
2356         if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
2357                 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
2358                 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
2359         }
2360         if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
2361                 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
2362                 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
2363         }
2364         if (oldval != val)
2365                 wr32(hw, hw->aq.asq.len, val);
2366
2367 freedom:
2368         kfree(event.msg_buf);
2369 out:
2370         /* re-enable Admin queue interrupt cause */
2371         iavf_misc_irq_enable(adapter);
2372 }
2373
2374 /**
2375  * iavf_client_task - worker thread to perform client work
2376  * @work: pointer to work_struct containing our data
2377  *
2378  * This task handles client interactions. Because client calls can be
2379  * reentrant, we can't handle them in the watchdog.
2380  **/
2381 static void iavf_client_task(struct work_struct *work)
2382 {
2383         struct iavf_adapter *adapter =
2384                 container_of(work, struct iavf_adapter, client_task.work);
2385
2386         /* If we can't get the client bit, just give up. We'll be rescheduled
2387          * later.
2388          */
2389
2390         if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section))
2391                 return;
2392
2393         if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2394                 iavf_client_subtask(adapter);
2395                 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
2396                 goto out;
2397         }
2398         if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2399                 iavf_notify_client_l2_params(&adapter->vsi);
2400                 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2401                 goto out;
2402         }
2403         if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
2404                 iavf_notify_client_close(&adapter->vsi, false);
2405                 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
2406                 goto out;
2407         }
2408         if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
2409                 iavf_notify_client_open(&adapter->vsi);
2410                 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
2411         }
2412 out:
2413         clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2414 }
2415
2416 /**
2417  * iavf_free_all_tx_resources - Free Tx Resources for All Queues
2418  * @adapter: board private structure
2419  *
2420  * Free all transmit software resources
2421  **/
2422 void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
2423 {
2424         int i;
2425
2426         if (!adapter->tx_rings)
2427                 return;
2428
2429         for (i = 0; i < adapter->num_active_queues; i++)
2430                 if (adapter->tx_rings[i].desc)
2431                         iavf_free_tx_resources(&adapter->tx_rings[i]);
2432 }
2433
2434 /**
2435  * iavf_setup_all_tx_resources - allocate all queues Tx resources
2436  * @adapter: board private structure
2437  *
2438  * If this function returns with an error, then it's possible one or
2439  * more of the rings is populated (while the rest are not).  It is the
2440  * callers duty to clean those orphaned rings.
2441  *
2442  * Return 0 on success, negative on failure
2443  **/
2444 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
2445 {
2446         int i, err = 0;
2447
2448         for (i = 0; i < adapter->num_active_queues; i++) {
2449                 adapter->tx_rings[i].count = adapter->tx_desc_count;
2450                 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
2451                 if (!err)
2452                         continue;
2453                 dev_err(&adapter->pdev->dev,
2454                         "Allocation for Tx Queue %u failed\n", i);
2455                 break;
2456         }
2457
2458         return err;
2459 }
2460
2461 /**
2462  * iavf_setup_all_rx_resources - allocate all queues Rx resources
2463  * @adapter: board private structure
2464  *
2465  * If this function returns with an error, then it's possible one or
2466  * more of the rings is populated (while the rest are not).  It is the
2467  * callers duty to clean those orphaned rings.
2468  *
2469  * Return 0 on success, negative on failure
2470  **/
2471 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
2472 {
2473         int i, err = 0;
2474
2475         for (i = 0; i < adapter->num_active_queues; i++) {
2476                 adapter->rx_rings[i].count = adapter->rx_desc_count;
2477                 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
2478                 if (!err)
2479                         continue;
2480                 dev_err(&adapter->pdev->dev,
2481                         "Allocation for Rx Queue %u failed\n", i);
2482                 break;
2483         }
2484         return err;
2485 }
2486
2487 /**
2488  * iavf_free_all_rx_resources - Free Rx Resources for All Queues
2489  * @adapter: board private structure
2490  *
2491  * Free all receive software resources
2492  **/
2493 void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
2494 {
2495         int i;
2496
2497         if (!adapter->rx_rings)
2498                 return;
2499
2500         for (i = 0; i < adapter->num_active_queues; i++)
2501                 if (adapter->rx_rings[i].desc)
2502                         iavf_free_rx_resources(&adapter->rx_rings[i]);
2503 }
2504
2505 /**
2506  * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
2507  * @adapter: board private structure
2508  * @max_tx_rate: max Tx bw for a tc
2509  **/
2510 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
2511                                       u64 max_tx_rate)
2512 {
2513         int speed = 0, ret = 0;
2514
2515         if (ADV_LINK_SUPPORT(adapter)) {
2516                 if (adapter->link_speed_mbps < U32_MAX) {
2517                         speed = adapter->link_speed_mbps;
2518                         goto validate_bw;
2519                 } else {
2520                         dev_err(&adapter->pdev->dev, "Unknown link speed\n");
2521                         return -EINVAL;
2522                 }
2523         }
2524
2525         switch (adapter->link_speed) {
2526         case VIRTCHNL_LINK_SPEED_40GB:
2527                 speed = SPEED_40000;
2528                 break;
2529         case VIRTCHNL_LINK_SPEED_25GB:
2530                 speed = SPEED_25000;
2531                 break;
2532         case VIRTCHNL_LINK_SPEED_20GB:
2533                 speed = SPEED_20000;
2534                 break;
2535         case VIRTCHNL_LINK_SPEED_10GB:
2536                 speed = SPEED_10000;
2537                 break;
2538         case VIRTCHNL_LINK_SPEED_5GB:
2539                 speed = SPEED_5000;
2540                 break;
2541         case VIRTCHNL_LINK_SPEED_2_5GB:
2542                 speed = SPEED_2500;
2543                 break;
2544         case VIRTCHNL_LINK_SPEED_1GB:
2545                 speed = SPEED_1000;
2546                 break;
2547         case VIRTCHNL_LINK_SPEED_100MB:
2548                 speed = SPEED_100;
2549                 break;
2550         default:
2551                 break;
2552         }
2553
2554 validate_bw:
2555         if (max_tx_rate > speed) {
2556                 dev_err(&adapter->pdev->dev,
2557                         "Invalid tx rate specified\n");
2558                 ret = -EINVAL;
2559         }
2560
2561         return ret;
2562 }
2563
2564 /**
2565  * iavf_validate_ch_config - validate queue mapping info
2566  * @adapter: board private structure
2567  * @mqprio_qopt: queue parameters
2568  *
2569  * This function validates if the config provided by the user to
2570  * configure queue channels is valid or not. Returns 0 on a valid
2571  * config.
2572  **/
2573 static int iavf_validate_ch_config(struct iavf_adapter *adapter,
2574                                    struct tc_mqprio_qopt_offload *mqprio_qopt)
2575 {
2576         u64 total_max_rate = 0;
2577         int i, num_qps = 0;
2578         u64 tx_rate = 0;
2579         int ret = 0;
2580
2581         if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
2582             mqprio_qopt->qopt.num_tc < 1)
2583                 return -EINVAL;
2584
2585         for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
2586                 if (!mqprio_qopt->qopt.count[i] ||
2587                     mqprio_qopt->qopt.offset[i] != num_qps)
2588                         return -EINVAL;
2589                 if (mqprio_qopt->min_rate[i]) {
2590                         dev_err(&adapter->pdev->dev,
2591                                 "Invalid min tx rate (greater than 0) specified\n");
2592                         return -EINVAL;
2593                 }
2594                 /*convert to Mbps */
2595                 tx_rate = div_u64(mqprio_qopt->max_rate[i],
2596                                   IAVF_MBPS_DIVISOR);
2597                 total_max_rate += tx_rate;
2598                 num_qps += mqprio_qopt->qopt.count[i];
2599         }
2600         if (num_qps > IAVF_MAX_REQ_QUEUES)
2601                 return -EINVAL;
2602
2603         ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
2604         return ret;
2605 }
2606
2607 /**
2608  * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
2609  * @adapter: board private structure
2610  **/
2611 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
2612 {
2613         struct iavf_cloud_filter *cf, *cftmp;
2614
2615         spin_lock_bh(&adapter->cloud_filter_list_lock);
2616         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2617                                  list) {
2618                 list_del(&cf->list);
2619                 kfree(cf);
2620                 adapter->num_cloud_filters--;
2621         }
2622         spin_unlock_bh(&adapter->cloud_filter_list_lock);
2623 }
2624
2625 /**
2626  * __iavf_setup_tc - configure multiple traffic classes
2627  * @netdev: network interface device structure
2628  * @type_data: tc offload data
2629  *
2630  * This function processes the config information provided by the
2631  * user to configure traffic classes/queue channels and packages the
2632  * information to request the PF to setup traffic classes.
2633  *
2634  * Returns 0 on success.
2635  **/
2636 static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
2637 {
2638         struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
2639         struct iavf_adapter *adapter = netdev_priv(netdev);
2640         struct virtchnl_vf_resource *vfres = adapter->vf_res;
2641         u8 num_tc = 0, total_qps = 0;
2642         int ret = 0, netdev_tc = 0;
2643         u64 max_tx_rate;
2644         u16 mode;
2645         int i;
2646
2647         num_tc = mqprio_qopt->qopt.num_tc;
2648         mode = mqprio_qopt->mode;
2649
2650         /* delete queue_channel */
2651         if (!mqprio_qopt->qopt.hw) {
2652                 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
2653                         /* reset the tc configuration */
2654                         netdev_reset_tc(netdev);
2655                         adapter->num_tc = 0;
2656                         netif_tx_stop_all_queues(netdev);
2657                         netif_tx_disable(netdev);
2658                         iavf_del_all_cloud_filters(adapter);
2659                         adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
2660                         goto exit;
2661                 } else {
2662                         return -EINVAL;
2663                 }
2664         }
2665
2666         /* add queue channel */
2667         if (mode == TC_MQPRIO_MODE_CHANNEL) {
2668                 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
2669                         dev_err(&adapter->pdev->dev, "ADq not supported\n");
2670                         return -EOPNOTSUPP;
2671                 }
2672                 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
2673                         dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
2674                         return -EINVAL;
2675                 }
2676
2677                 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
2678                 if (ret)
2679                         return ret;
2680                 /* Return if same TC config is requested */
2681                 if (adapter->num_tc == num_tc)
2682                         return 0;
2683                 adapter->num_tc = num_tc;
2684
2685                 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2686                         if (i < num_tc) {
2687                                 adapter->ch_config.ch_info[i].count =
2688                                         mqprio_qopt->qopt.count[i];
2689                                 adapter->ch_config.ch_info[i].offset =
2690                                         mqprio_qopt->qopt.offset[i];
2691                                 total_qps += mqprio_qopt->qopt.count[i];
2692                                 max_tx_rate = mqprio_qopt->max_rate[i];
2693                                 /* convert to Mbps */
2694                                 max_tx_rate = div_u64(max_tx_rate,
2695                                                       IAVF_MBPS_DIVISOR);
2696                                 adapter->ch_config.ch_info[i].max_tx_rate =
2697                                         max_tx_rate;
2698                         } else {
2699                                 adapter->ch_config.ch_info[i].count = 1;
2700                                 adapter->ch_config.ch_info[i].offset = 0;
2701                         }
2702                 }
2703                 adapter->ch_config.total_qps = total_qps;
2704                 netif_tx_stop_all_queues(netdev);
2705                 netif_tx_disable(netdev);
2706                 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
2707                 netdev_reset_tc(netdev);
2708                 /* Report the tc mapping up the stack */
2709                 netdev_set_num_tc(adapter->netdev, num_tc);
2710                 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2711                         u16 qcount = mqprio_qopt->qopt.count[i];
2712                         u16 qoffset = mqprio_qopt->qopt.offset[i];
2713
2714                         if (i < num_tc)
2715                                 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
2716                                                     qoffset);
2717                 }
2718         }
2719 exit:
2720         return ret;
2721 }
2722
2723 /**
2724  * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
2725  * @adapter: board private structure
2726  * @f: pointer to struct flow_cls_offload
2727  * @filter: pointer to cloud filter structure
2728  */
2729 static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
2730                                  struct flow_cls_offload *f,
2731                                  struct iavf_cloud_filter *filter)
2732 {
2733         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2734         struct flow_dissector *dissector = rule->match.dissector;
2735         u16 n_proto_mask = 0;
2736         u16 n_proto_key = 0;
2737         u8 field_flags = 0;
2738         u16 addr_type = 0;
2739         u16 n_proto = 0;
2740         int i = 0;
2741         struct virtchnl_filter *vf = &filter->f;
2742
2743         if (dissector->used_keys &
2744             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2745               BIT(FLOW_DISSECTOR_KEY_BASIC) |
2746               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2747               BIT(FLOW_DISSECTOR_KEY_VLAN) |
2748               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2749               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2750               BIT(FLOW_DISSECTOR_KEY_PORTS) |
2751               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
2752                 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
2753                         dissector->used_keys);
2754                 return -EOPNOTSUPP;
2755         }
2756
2757         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
2758                 struct flow_match_enc_keyid match;
2759
2760                 flow_rule_match_enc_keyid(rule, &match);
2761                 if (match.mask->keyid != 0)
2762                         field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
2763         }
2764
2765         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2766                 struct flow_match_basic match;
2767
2768                 flow_rule_match_basic(rule, &match);
2769                 n_proto_key = ntohs(match.key->n_proto);
2770                 n_proto_mask = ntohs(match.mask->n_proto);
2771
2772                 if (n_proto_key == ETH_P_ALL) {
2773                         n_proto_key = 0;
2774                         n_proto_mask = 0;
2775                 }
2776                 n_proto = n_proto_key & n_proto_mask;
2777                 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
2778                         return -EINVAL;
2779                 if (n_proto == ETH_P_IPV6) {
2780                         /* specify flow type as TCP IPv6 */
2781                         vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
2782                 }
2783
2784                 if (match.key->ip_proto != IPPROTO_TCP) {
2785                         dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
2786                         return -EINVAL;
2787                 }
2788         }
2789
2790         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2791                 struct flow_match_eth_addrs match;
2792
2793                 flow_rule_match_eth_addrs(rule, &match);
2794
2795                 /* use is_broadcast and is_zero to check for all 0xf or 0 */
2796                 if (!is_zero_ether_addr(match.mask->dst)) {
2797                         if (is_broadcast_ether_addr(match.mask->dst)) {
2798                                 field_flags |= IAVF_CLOUD_FIELD_OMAC;
2799                         } else {
2800                                 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
2801                                         match.mask->dst);
2802                                 return IAVF_ERR_CONFIG;
2803                         }
2804                 }
2805
2806                 if (!is_zero_ether_addr(match.mask->src)) {
2807                         if (is_broadcast_ether_addr(match.mask->src)) {
2808                                 field_flags |= IAVF_CLOUD_FIELD_IMAC;
2809                         } else {
2810                                 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
2811                                         match.mask->src);
2812                                 return IAVF_ERR_CONFIG;
2813                         }
2814                 }
2815
2816                 if (!is_zero_ether_addr(match.key->dst))
2817                         if (is_valid_ether_addr(match.key->dst) ||
2818                             is_multicast_ether_addr(match.key->dst)) {
2819                                 /* set the mask if a valid dst_mac address */
2820                                 for (i = 0; i < ETH_ALEN; i++)
2821                                         vf->mask.tcp_spec.dst_mac[i] |= 0xff;
2822                                 ether_addr_copy(vf->data.tcp_spec.dst_mac,
2823                                                 match.key->dst);
2824                         }
2825
2826                 if (!is_zero_ether_addr(match.key->src))
2827                         if (is_valid_ether_addr(match.key->src) ||
2828                             is_multicast_ether_addr(match.key->src)) {
2829                                 /* set the mask if a valid dst_mac address */
2830                                 for (i = 0; i < ETH_ALEN; i++)
2831                                         vf->mask.tcp_spec.src_mac[i] |= 0xff;
2832                                 ether_addr_copy(vf->data.tcp_spec.src_mac,
2833                                                 match.key->src);
2834                 }
2835         }
2836
2837         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2838                 struct flow_match_vlan match;
2839
2840                 flow_rule_match_vlan(rule, &match);
2841                 if (match.mask->vlan_id) {
2842                         if (match.mask->vlan_id == VLAN_VID_MASK) {
2843                                 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
2844                         } else {
2845                                 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
2846                                         match.mask->vlan_id);
2847                                 return IAVF_ERR_CONFIG;
2848                         }
2849                 }
2850                 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
2851                 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
2852         }
2853
2854         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2855                 struct flow_match_control match;
2856
2857                 flow_rule_match_control(rule, &match);
2858                 addr_type = match.key->addr_type;
2859         }
2860
2861         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2862                 struct flow_match_ipv4_addrs match;
2863
2864                 flow_rule_match_ipv4_addrs(rule, &match);
2865                 if (match.mask->dst) {
2866                         if (match.mask->dst == cpu_to_be32(0xffffffff)) {
2867                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
2868                         } else {
2869                                 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
2870                                         be32_to_cpu(match.mask->dst));
2871                                 return IAVF_ERR_CONFIG;
2872                         }
2873                 }
2874
2875                 if (match.mask->src) {
2876                         if (match.mask->src == cpu_to_be32(0xffffffff)) {
2877                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
2878                         } else {
2879                                 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
2880                                         be32_to_cpu(match.mask->dst));
2881                                 return IAVF_ERR_CONFIG;
2882                         }
2883                 }
2884
2885                 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
2886                         dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
2887                         return IAVF_ERR_CONFIG;
2888                 }
2889                 if (match.key->dst) {
2890                         vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
2891                         vf->data.tcp_spec.dst_ip[0] = match.key->dst;
2892                 }
2893                 if (match.key->src) {
2894                         vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
2895                         vf->data.tcp_spec.src_ip[0] = match.key->src;
2896                 }
2897         }
2898
2899         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2900                 struct flow_match_ipv6_addrs match;
2901
2902                 flow_rule_match_ipv6_addrs(rule, &match);
2903
2904                 /* validate mask, make sure it is not IPV6_ADDR_ANY */
2905                 if (ipv6_addr_any(&match.mask->dst)) {
2906                         dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
2907                                 IPV6_ADDR_ANY);
2908                         return IAVF_ERR_CONFIG;
2909                 }
2910
2911                 /* src and dest IPv6 address should not be LOOPBACK
2912                  * (0:0:0:0:0:0:0:1) which can be represented as ::1
2913                  */
2914                 if (ipv6_addr_loopback(&match.key->dst) ||
2915                     ipv6_addr_loopback(&match.key->src)) {
2916                         dev_err(&adapter->pdev->dev,
2917                                 "ipv6 addr should not be loopback\n");
2918                         return IAVF_ERR_CONFIG;
2919                 }
2920                 if (!ipv6_addr_any(&match.mask->dst) ||
2921                     !ipv6_addr_any(&match.mask->src))
2922                         field_flags |= IAVF_CLOUD_FIELD_IIP;
2923
2924                 for (i = 0; i < 4; i++)
2925                         vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
2926                 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
2927                        sizeof(vf->data.tcp_spec.dst_ip));
2928                 for (i = 0; i < 4; i++)
2929                         vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
2930                 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
2931                        sizeof(vf->data.tcp_spec.src_ip));
2932         }
2933         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2934                 struct flow_match_ports match;
2935
2936                 flow_rule_match_ports(rule, &match);
2937                 if (match.mask->src) {
2938                         if (match.mask->src == cpu_to_be16(0xffff)) {
2939                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
2940                         } else {
2941                                 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
2942                                         be16_to_cpu(match.mask->src));
2943                                 return IAVF_ERR_CONFIG;
2944                         }
2945                 }
2946
2947                 if (match.mask->dst) {
2948                         if (match.mask->dst == cpu_to_be16(0xffff)) {
2949                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
2950                         } else {
2951                                 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
2952                                         be16_to_cpu(match.mask->dst));
2953                                 return IAVF_ERR_CONFIG;
2954                         }
2955                 }
2956                 if (match.key->dst) {
2957                         vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
2958                         vf->data.tcp_spec.dst_port = match.key->dst;
2959                 }
2960
2961                 if (match.key->src) {
2962                         vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
2963                         vf->data.tcp_spec.src_port = match.key->src;
2964                 }
2965         }
2966         vf->field_flags = field_flags;
2967
2968         return 0;
2969 }
2970
2971 /**
2972  * iavf_handle_tclass - Forward to a traffic class on the device
2973  * @adapter: board private structure
2974  * @tc: traffic class index on the device
2975  * @filter: pointer to cloud filter structure
2976  */
2977 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
2978                               struct iavf_cloud_filter *filter)
2979 {
2980         if (tc == 0)
2981                 return 0;
2982         if (tc < adapter->num_tc) {
2983                 if (!filter->f.data.tcp_spec.dst_port) {
2984                         dev_err(&adapter->pdev->dev,
2985                                 "Specify destination port to redirect to traffic class other than TC0\n");
2986                         return -EINVAL;
2987                 }
2988         }
2989         /* redirect to a traffic class on the same device */
2990         filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
2991         filter->f.action_meta = tc;
2992         return 0;
2993 }
2994
2995 /**
2996  * iavf_configure_clsflower - Add tc flower filters
2997  * @adapter: board private structure
2998  * @cls_flower: Pointer to struct flow_cls_offload
2999  */
3000 static int iavf_configure_clsflower(struct iavf_adapter *adapter,
3001                                     struct flow_cls_offload *cls_flower)
3002 {
3003         int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
3004         struct iavf_cloud_filter *filter = NULL;
3005         int err = -EINVAL, count = 50;
3006
3007         if (tc < 0) {
3008                 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
3009                 return -EINVAL;
3010         }
3011
3012         filter = kzalloc(sizeof(*filter), GFP_KERNEL);
3013         if (!filter)
3014                 return -ENOMEM;
3015
3016         while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3017                                 &adapter->crit_section)) {
3018                 if (--count == 0)
3019                         goto err;
3020                 udelay(1);
3021         }
3022
3023         filter->cookie = cls_flower->cookie;
3024
3025         /* set the mask to all zeroes to begin with */
3026         memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
3027         /* start out with flow type and eth type IPv4 to begin with */
3028         filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
3029         err = iavf_parse_cls_flower(adapter, cls_flower, filter);
3030         if (err < 0)
3031                 goto err;
3032
3033         err = iavf_handle_tclass(adapter, tc, filter);
3034         if (err < 0)
3035                 goto err;
3036
3037         /* add filter to the list */
3038         spin_lock_bh(&adapter->cloud_filter_list_lock);
3039         list_add_tail(&filter->list, &adapter->cloud_filter_list);
3040         adapter->num_cloud_filters++;
3041         filter->add = true;
3042         adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3043         spin_unlock_bh(&adapter->cloud_filter_list_lock);
3044 err:
3045         if (err)
3046                 kfree(filter);
3047
3048         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3049         return err;
3050 }
3051
3052 /* iavf_find_cf - Find the cloud filter in the list
3053  * @adapter: Board private structure
3054  * @cookie: filter specific cookie
3055  *
3056  * Returns ptr to the filter object or NULL. Must be called while holding the
3057  * cloud_filter_list_lock.
3058  */
3059 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
3060                                               unsigned long *cookie)
3061 {
3062         struct iavf_cloud_filter *filter = NULL;
3063
3064         if (!cookie)
3065                 return NULL;
3066
3067         list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
3068                 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
3069                         return filter;
3070         }
3071         return NULL;
3072 }
3073
3074 /**
3075  * iavf_delete_clsflower - Remove tc flower filters
3076  * @adapter: board private structure
3077  * @cls_flower: Pointer to struct flow_cls_offload
3078  */
3079 static int iavf_delete_clsflower(struct iavf_adapter *adapter,
3080                                  struct flow_cls_offload *cls_flower)
3081 {
3082         struct iavf_cloud_filter *filter = NULL;
3083         int err = 0;
3084
3085         spin_lock_bh(&adapter->cloud_filter_list_lock);
3086         filter = iavf_find_cf(adapter, &cls_flower->cookie);
3087         if (filter) {
3088                 filter->del = true;
3089                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
3090         } else {
3091                 err = -EINVAL;
3092         }
3093         spin_unlock_bh(&adapter->cloud_filter_list_lock);
3094
3095         return err;
3096 }
3097
3098 /**
3099  * iavf_setup_tc_cls_flower - flower classifier offloads
3100  * @adapter: board private structure
3101  * @cls_flower: pointer to flow_cls_offload struct with flow info
3102  */
3103 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
3104                                     struct flow_cls_offload *cls_flower)
3105 {
3106         switch (cls_flower->command) {
3107         case FLOW_CLS_REPLACE:
3108                 return iavf_configure_clsflower(adapter, cls_flower);
3109         case FLOW_CLS_DESTROY:
3110                 return iavf_delete_clsflower(adapter, cls_flower);
3111         case FLOW_CLS_STATS:
3112                 return -EOPNOTSUPP;
3113         default:
3114                 return -EOPNOTSUPP;
3115         }
3116 }
3117
3118 /**
3119  * iavf_setup_tc_block_cb - block callback for tc
3120  * @type: type of offload
3121  * @type_data: offload data
3122  * @cb_priv:
3123  *
3124  * This function is the block callback for traffic classes
3125  **/
3126 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3127                                   void *cb_priv)
3128 {
3129         struct iavf_adapter *adapter = cb_priv;
3130
3131         if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
3132                 return -EOPNOTSUPP;
3133
3134         switch (type) {
3135         case TC_SETUP_CLSFLOWER:
3136                 return iavf_setup_tc_cls_flower(cb_priv, type_data);
3137         default:
3138                 return -EOPNOTSUPP;
3139         }
3140 }
3141
3142 static LIST_HEAD(iavf_block_cb_list);
3143
3144 /**
3145  * iavf_setup_tc - configure multiple traffic classes
3146  * @netdev: network interface device structure
3147  * @type: type of offload
3148  * @type_data: tc offload data
3149  *
3150  * This function is the callback to ndo_setup_tc in the
3151  * netdev_ops.
3152  *
3153  * Returns 0 on success
3154  **/
3155 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
3156                          void *type_data)
3157 {
3158         struct iavf_adapter *adapter = netdev_priv(netdev);
3159
3160         switch (type) {
3161         case TC_SETUP_QDISC_MQPRIO:
3162                 return __iavf_setup_tc(netdev, type_data);
3163         case TC_SETUP_BLOCK:
3164                 return flow_block_cb_setup_simple(type_data,
3165                                                   &iavf_block_cb_list,
3166                                                   iavf_setup_tc_block_cb,
3167                                                   adapter, adapter, true);
3168         default:
3169                 return -EOPNOTSUPP;
3170         }
3171 }
3172
3173 /**
3174  * iavf_open - Called when a network interface is made active
3175  * @netdev: network interface device structure
3176  *
3177  * Returns 0 on success, negative value on failure
3178  *
3179  * The open entry point is called when a network interface is made
3180  * active by the system (IFF_UP).  At this point all resources needed
3181  * for transmit and receive operations are allocated, the interrupt
3182  * handler is registered with the OS, the watchdog is started,
3183  * and the stack is notified that the interface is ready.
3184  **/
3185 static int iavf_open(struct net_device *netdev)
3186 {
3187         struct iavf_adapter *adapter = netdev_priv(netdev);
3188         int err;
3189
3190         if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
3191                 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
3192                 return -EIO;
3193         }
3194
3195         while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3196                                 &adapter->crit_section))
3197                 usleep_range(500, 1000);
3198
3199         if (adapter->state != __IAVF_DOWN) {
3200                 err = -EBUSY;
3201                 goto err_unlock;
3202         }
3203
3204         /* allocate transmit descriptors */
3205         err = iavf_setup_all_tx_resources(adapter);
3206         if (err)
3207                 goto err_setup_tx;
3208
3209         /* allocate receive descriptors */
3210         err = iavf_setup_all_rx_resources(adapter);
3211         if (err)
3212                 goto err_setup_rx;
3213
3214         /* clear any pending interrupts, may auto mask */
3215         err = iavf_request_traffic_irqs(adapter, netdev->name);
3216         if (err)
3217                 goto err_req_irq;
3218
3219         spin_lock_bh(&adapter->mac_vlan_list_lock);
3220
3221         iavf_add_filter(adapter, adapter->hw.mac.addr);
3222
3223         spin_unlock_bh(&adapter->mac_vlan_list_lock);
3224
3225         iavf_configure(adapter);
3226
3227         iavf_up_complete(adapter);
3228
3229         iavf_irq_enable(adapter, true);
3230
3231         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3232
3233         return 0;
3234
3235 err_req_irq:
3236         iavf_down(adapter);
3237         iavf_free_traffic_irqs(adapter);
3238 err_setup_rx:
3239         iavf_free_all_rx_resources(adapter);
3240 err_setup_tx:
3241         iavf_free_all_tx_resources(adapter);
3242 err_unlock:
3243         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3244
3245         return err;
3246 }
3247
3248 /**
3249  * iavf_close - Disables a network interface
3250  * @netdev: network interface device structure
3251  *
3252  * Returns 0, this is not allowed to fail
3253  *
3254  * The close entry point is called when an interface is de-activated
3255  * by the OS.  The hardware is still under the drivers control, but
3256  * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
3257  * are freed, along with all transmit and receive resources.
3258  **/
3259 static int iavf_close(struct net_device *netdev)
3260 {
3261         struct iavf_adapter *adapter = netdev_priv(netdev);
3262         int status;
3263
3264         if (adapter->state <= __IAVF_DOWN_PENDING)
3265                 return 0;
3266
3267         while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3268                                 &adapter->crit_section))
3269                 usleep_range(500, 1000);
3270
3271         set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3272         if (CLIENT_ENABLED(adapter))
3273                 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3274
3275         iavf_down(adapter);
3276         adapter->state = __IAVF_DOWN_PENDING;
3277         iavf_free_traffic_irqs(adapter);
3278
3279         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3280
3281         /* We explicitly don't free resources here because the hardware is
3282          * still active and can DMA into memory. Resources are cleared in
3283          * iavf_virtchnl_completion() after we get confirmation from the PF
3284          * driver that the rings have been stopped.
3285          *
3286          * Also, we wait for state to transition to __IAVF_DOWN before
3287          * returning. State change occurs in iavf_virtchnl_completion() after
3288          * VF resources are released (which occurs after PF driver processes and
3289          * responds to admin queue commands).
3290          */
3291
3292         status = wait_event_timeout(adapter->down_waitqueue,
3293                                     adapter->state == __IAVF_DOWN,
3294                                     msecs_to_jiffies(500));
3295         if (!status)
3296                 netdev_warn(netdev, "Device resources not yet released\n");
3297         return 0;
3298 }
3299
3300 /**
3301  * iavf_change_mtu - Change the Maximum Transfer Unit
3302  * @netdev: network interface device structure
3303  * @new_mtu: new value for maximum frame size
3304  *
3305  * Returns 0 on success, negative on failure
3306  **/
3307 static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
3308 {
3309         struct iavf_adapter *adapter = netdev_priv(netdev);
3310
3311         netdev->mtu = new_mtu;
3312         if (CLIENT_ENABLED(adapter)) {
3313                 iavf_notify_client_l2_params(&adapter->vsi);
3314                 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3315         }
3316         adapter->flags |= IAVF_FLAG_RESET_NEEDED;
3317         queue_work(iavf_wq, &adapter->reset_task);
3318
3319         return 0;
3320 }
3321
3322 /**
3323  * iavf_set_features - set the netdev feature flags
3324  * @netdev: ptr to the netdev being adjusted
3325  * @features: the feature set that the stack is suggesting
3326  * Note: expects to be called while under rtnl_lock()
3327  **/
3328 static int iavf_set_features(struct net_device *netdev,
3329                              netdev_features_t features)
3330 {
3331         struct iavf_adapter *adapter = netdev_priv(netdev);
3332
3333         /* Don't allow changing VLAN_RX flag when adapter is not capable
3334          * of VLAN offload
3335          */
3336         if (!VLAN_ALLOWED(adapter)) {
3337                 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
3338                         return -EINVAL;
3339         } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
3340                 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3341                         adapter->aq_required |=
3342                                 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
3343                 else
3344                         adapter->aq_required |=
3345                                 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
3346         }
3347
3348         return 0;
3349 }
3350
3351 /**
3352  * iavf_features_check - Validate encapsulated packet conforms to limits
3353  * @skb: skb buff
3354  * @dev: This physical port's netdev
3355  * @features: Offload features that the stack believes apply
3356  **/
3357 static netdev_features_t iavf_features_check(struct sk_buff *skb,
3358                                              struct net_device *dev,
3359                                              netdev_features_t features)
3360 {
3361         size_t len;
3362
3363         /* No point in doing any of this if neither checksum nor GSO are
3364          * being requested for this frame.  We can rule out both by just
3365          * checking for CHECKSUM_PARTIAL
3366          */
3367         if (skb->ip_summed != CHECKSUM_PARTIAL)
3368                 return features;
3369
3370         /* We cannot support GSO if the MSS is going to be less than
3371          * 64 bytes.  If it is then we need to drop support for GSO.
3372          */
3373         if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
3374                 features &= ~NETIF_F_GSO_MASK;
3375
3376         /* MACLEN can support at most 63 words */
3377         len = skb_network_header(skb) - skb->data;
3378         if (len & ~(63 * 2))
3379                 goto out_err;
3380
3381         /* IPLEN and EIPLEN can support at most 127 dwords */
3382         len = skb_transport_header(skb) - skb_network_header(skb);
3383         if (len & ~(127 * 4))
3384                 goto out_err;
3385
3386         if (skb->encapsulation) {
3387                 /* L4TUNLEN can support 127 words */
3388                 len = skb_inner_network_header(skb) - skb_transport_header(skb);
3389                 if (len & ~(127 * 2))
3390                         goto out_err;
3391
3392                 /* IPLEN can support at most 127 dwords */
3393                 len = skb_inner_transport_header(skb) -
3394                       skb_inner_network_header(skb);
3395                 if (len & ~(127 * 4))
3396                         goto out_err;
3397         }
3398
3399         /* No need to validate L4LEN as TCP is the only protocol with a
3400          * a flexible value and we support all possible values supported
3401          * by TCP, which is at most 15 dwords
3402          */
3403
3404         return features;
3405 out_err:
3406         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3407 }
3408
3409 /**
3410  * iavf_fix_features - fix up the netdev feature bits
3411  * @netdev: our net device
3412  * @features: desired feature bits
3413  *
3414  * Returns fixed-up features bits
3415  **/
3416 static netdev_features_t iavf_fix_features(struct net_device *netdev,
3417                                            netdev_features_t features)
3418 {
3419         struct iavf_adapter *adapter = netdev_priv(netdev);
3420
3421         if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
3422                 features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3423                               NETIF_F_HW_VLAN_CTAG_RX |
3424                               NETIF_F_HW_VLAN_CTAG_FILTER);
3425
3426         return features;
3427 }
3428
3429 static const struct net_device_ops iavf_netdev_ops = {
3430         .ndo_open               = iavf_open,
3431         .ndo_stop               = iavf_close,
3432         .ndo_start_xmit         = iavf_xmit_frame,
3433         .ndo_set_rx_mode        = iavf_set_rx_mode,
3434         .ndo_validate_addr      = eth_validate_addr,
3435         .ndo_set_mac_address    = iavf_set_mac,
3436         .ndo_change_mtu         = iavf_change_mtu,
3437         .ndo_tx_timeout         = iavf_tx_timeout,
3438         .ndo_vlan_rx_add_vid    = iavf_vlan_rx_add_vid,
3439         .ndo_vlan_rx_kill_vid   = iavf_vlan_rx_kill_vid,
3440         .ndo_features_check     = iavf_features_check,
3441         .ndo_fix_features       = iavf_fix_features,
3442         .ndo_set_features       = iavf_set_features,
3443         .ndo_setup_tc           = iavf_setup_tc,
3444 };
3445
3446 /**
3447  * iavf_check_reset_complete - check that VF reset is complete
3448  * @hw: pointer to hw struct
3449  *
3450  * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
3451  **/
3452 static int iavf_check_reset_complete(struct iavf_hw *hw)
3453 {
3454         u32 rstat;
3455         int i;
3456
3457         for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
3458                 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
3459                              IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3460                 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
3461                     (rstat == VIRTCHNL_VFR_COMPLETED))
3462                         return 0;
3463                 usleep_range(10, 20);
3464         }
3465         return -EBUSY;
3466 }
3467
3468 /**
3469  * iavf_process_config - Process the config information we got from the PF
3470  * @adapter: board private structure
3471  *
3472  * Verify that we have a valid config struct, and set up our netdev features
3473  * and our VSI struct.
3474  **/
3475 int iavf_process_config(struct iavf_adapter *adapter)
3476 {
3477         struct virtchnl_vf_resource *vfres = adapter->vf_res;
3478         int i, num_req_queues = adapter->num_req_queues;
3479         struct net_device *netdev = adapter->netdev;
3480         struct iavf_vsi *vsi = &adapter->vsi;
3481         netdev_features_t hw_enc_features;
3482         netdev_features_t hw_features;
3483
3484         /* got VF config message back from PF, now we can parse it */
3485         for (i = 0; i < vfres->num_vsis; i++) {
3486                 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
3487                         adapter->vsi_res = &vfres->vsi_res[i];
3488         }
3489         if (!adapter->vsi_res) {
3490                 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
3491                 return -ENODEV;
3492         }
3493
3494         if (num_req_queues &&
3495             num_req_queues > adapter->vsi_res->num_queue_pairs) {
3496                 /* Problem.  The PF gave us fewer queues than what we had
3497                  * negotiated in our request.  Need a reset to see if we can't
3498                  * get back to a working state.
3499                  */
3500                 dev_err(&adapter->pdev->dev,
3501                         "Requested %d queues, but PF only gave us %d.\n",
3502                         num_req_queues,
3503                         adapter->vsi_res->num_queue_pairs);
3504                 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
3505                 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
3506                 iavf_schedule_reset(adapter);
3507                 return -ENODEV;
3508         }
3509         adapter->num_req_queues = 0;
3510
3511         hw_enc_features = NETIF_F_SG                    |
3512                           NETIF_F_IP_CSUM               |
3513                           NETIF_F_IPV6_CSUM             |
3514                           NETIF_F_HIGHDMA               |
3515                           NETIF_F_SOFT_FEATURES |
3516                           NETIF_F_TSO                   |
3517                           NETIF_F_TSO_ECN               |
3518                           NETIF_F_TSO6                  |
3519                           NETIF_F_SCTP_CRC              |
3520                           NETIF_F_RXHASH                |
3521                           NETIF_F_RXCSUM                |
3522                           0;
3523
3524         /* advertise to stack only if offloads for encapsulated packets is
3525          * supported
3526          */
3527         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
3528                 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL       |
3529                                    NETIF_F_GSO_GRE              |
3530                                    NETIF_F_GSO_GRE_CSUM         |
3531                                    NETIF_F_GSO_IPXIP4           |
3532                                    NETIF_F_GSO_IPXIP6           |
3533                                    NETIF_F_GSO_UDP_TUNNEL_CSUM  |
3534                                    NETIF_F_GSO_PARTIAL          |
3535                                    0;
3536
3537                 if (!(vfres->vf_cap_flags &
3538                       VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
3539                         netdev->gso_partial_features |=
3540                                 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3541
3542                 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3543                 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
3544                 netdev->hw_enc_features |= hw_enc_features;
3545         }
3546         /* record features VLANs can make use of */
3547         netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
3548
3549         /* Write features and hw_features separately to avoid polluting
3550          * with, or dropping, features that are set when we registered.
3551          */
3552         hw_features = hw_enc_features;
3553
3554         /* Enable VLAN features if supported */
3555         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3556                 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
3557                                 NETIF_F_HW_VLAN_CTAG_RX);
3558         /* Enable cloud filter if ADQ is supported */
3559         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
3560                 hw_features |= NETIF_F_HW_TC;
3561         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
3562                 hw_features |= NETIF_F_GSO_UDP_L4;
3563
3564         netdev->hw_features |= hw_features;
3565
3566         netdev->features |= hw_features;
3567
3568         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3569                 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3570
3571         netdev->priv_flags |= IFF_UNICAST_FLT;
3572
3573         /* Do not turn on offloads when they are requested to be turned off.
3574          * TSO needs minimum 576 bytes to work correctly.
3575          */
3576         if (netdev->wanted_features) {
3577                 if (!(netdev->wanted_features & NETIF_F_TSO) ||
3578                     netdev->mtu < 576)
3579                         netdev->features &= ~NETIF_F_TSO;
3580                 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
3581                     netdev->mtu < 576)
3582                         netdev->features &= ~NETIF_F_TSO6;
3583                 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
3584                         netdev->features &= ~NETIF_F_TSO_ECN;
3585                 if (!(netdev->wanted_features & NETIF_F_GRO))
3586                         netdev->features &= ~NETIF_F_GRO;
3587                 if (!(netdev->wanted_features & NETIF_F_GSO))
3588                         netdev->features &= ~NETIF_F_GSO;
3589         }
3590
3591         adapter->vsi.id = adapter->vsi_res->vsi_id;
3592
3593         adapter->vsi.back = adapter;
3594         adapter->vsi.base_vector = 1;
3595         adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
3596         vsi->netdev = adapter->netdev;
3597         vsi->qs_handle = adapter->vsi_res->qset_handle;
3598         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
3599                 adapter->rss_key_size = vfres->rss_key_size;
3600                 adapter->rss_lut_size = vfres->rss_lut_size;
3601         } else {
3602                 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
3603                 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
3604         }
3605
3606         return 0;
3607 }
3608
3609 /**
3610  * iavf_init_task - worker thread to perform delayed initialization
3611  * @work: pointer to work_struct containing our data
3612  *
3613  * This task completes the work that was begun in probe. Due to the nature
3614  * of VF-PF communications, we may need to wait tens of milliseconds to get
3615  * responses back from the PF. Rather than busy-wait in probe and bog down the
3616  * whole system, we'll do it in a task so we can sleep.
3617  * This task only runs during driver init. Once we've established
3618  * communications with the PF driver and set up our netdev, the watchdog
3619  * takes over.
3620  **/
3621 static void iavf_init_task(struct work_struct *work)
3622 {
3623         struct iavf_adapter *adapter = container_of(work,
3624                                                     struct iavf_adapter,
3625                                                     init_task.work);
3626         struct iavf_hw *hw = &adapter->hw;
3627
3628         switch (adapter->state) {
3629         case __IAVF_STARTUP:
3630                 if (iavf_startup(adapter) < 0)
3631                         goto init_failed;
3632                 break;
3633         case __IAVF_INIT_VERSION_CHECK:
3634                 if (iavf_init_version_check(adapter) < 0)
3635                         goto init_failed;
3636                 break;
3637         case __IAVF_INIT_GET_RESOURCES:
3638                 if (iavf_init_get_resources(adapter) < 0)
3639                         goto init_failed;
3640                 return;
3641         default:
3642                 goto init_failed;
3643         }
3644
3645         queue_delayed_work(iavf_wq, &adapter->init_task,
3646                            msecs_to_jiffies(30));
3647         return;
3648 init_failed:
3649         if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
3650                 dev_err(&adapter->pdev->dev,
3651                         "Failed to communicate with PF; waiting before retry\n");
3652                 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
3653                 iavf_shutdown_adminq(hw);
3654                 adapter->state = __IAVF_STARTUP;
3655                 queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5);
3656                 return;
3657         }
3658         queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
3659 }
3660
3661 /**
3662  * iavf_shutdown - Shutdown the device in preparation for a reboot
3663  * @pdev: pci device structure
3664  **/
3665 static void iavf_shutdown(struct pci_dev *pdev)
3666 {
3667         struct net_device *netdev = pci_get_drvdata(pdev);
3668         struct iavf_adapter *adapter = netdev_priv(netdev);
3669
3670         netif_device_detach(netdev);
3671
3672         if (netif_running(netdev))
3673                 iavf_close(netdev);
3674
3675         /* Prevent the watchdog from running. */
3676         adapter->state = __IAVF_REMOVE;
3677         adapter->aq_required = 0;
3678
3679 #ifdef CONFIG_PM
3680         pci_save_state(pdev);
3681
3682 #endif
3683         pci_disable_device(pdev);
3684 }
3685
3686 /**
3687  * iavf_probe - Device Initialization Routine
3688  * @pdev: PCI device information struct
3689  * @ent: entry in iavf_pci_tbl
3690  *
3691  * Returns 0 on success, negative on failure
3692  *
3693  * iavf_probe initializes an adapter identified by a pci_dev structure.
3694  * The OS initialization, configuring of the adapter private structure,
3695  * and a hardware reset occur.
3696  **/
3697 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3698 {
3699         struct net_device *netdev;
3700         struct iavf_adapter *adapter = NULL;
3701         struct iavf_hw *hw = NULL;
3702         int err;
3703
3704         err = pci_enable_device(pdev);
3705         if (err)
3706                 return err;
3707
3708         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3709         if (err) {
3710                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3711                 if (err) {
3712                         dev_err(&pdev->dev,
3713                                 "DMA configuration failed: 0x%x\n", err);
3714                         goto err_dma;
3715                 }
3716         }
3717
3718         err = pci_request_regions(pdev, iavf_driver_name);
3719         if (err) {
3720                 dev_err(&pdev->dev,
3721                         "pci_request_regions failed 0x%x\n", err);
3722                 goto err_pci_reg;
3723         }
3724
3725         pci_enable_pcie_error_reporting(pdev);
3726
3727         pci_set_master(pdev);
3728
3729         netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
3730                                    IAVF_MAX_REQ_QUEUES);
3731         if (!netdev) {
3732                 err = -ENOMEM;
3733                 goto err_alloc_etherdev;
3734         }
3735
3736         SET_NETDEV_DEV(netdev, &pdev->dev);
3737
3738         pci_set_drvdata(pdev, netdev);
3739         adapter = netdev_priv(netdev);
3740
3741         adapter->netdev = netdev;
3742         adapter->pdev = pdev;
3743
3744         hw = &adapter->hw;
3745         hw->back = adapter;
3746
3747         adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3748         adapter->state = __IAVF_STARTUP;
3749
3750         /* Call save state here because it relies on the adapter struct. */
3751         pci_save_state(pdev);
3752
3753         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3754                               pci_resource_len(pdev, 0));
3755         if (!hw->hw_addr) {
3756                 err = -EIO;
3757                 goto err_ioremap;
3758         }
3759         hw->vendor_id = pdev->vendor;
3760         hw->device_id = pdev->device;
3761         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
3762         hw->subsystem_vendor_id = pdev->subsystem_vendor;
3763         hw->subsystem_device_id = pdev->subsystem_device;
3764         hw->bus.device = PCI_SLOT(pdev->devfn);
3765         hw->bus.func = PCI_FUNC(pdev->devfn);
3766         hw->bus.bus_id = pdev->bus->number;
3767
3768         /* set up the locks for the AQ, do this only once in probe
3769          * and destroy them only once in remove
3770          */
3771         mutex_init(&hw->aq.asq_mutex);
3772         mutex_init(&hw->aq.arq_mutex);
3773
3774         spin_lock_init(&adapter->mac_vlan_list_lock);
3775         spin_lock_init(&adapter->cloud_filter_list_lock);
3776         spin_lock_init(&adapter->fdir_fltr_lock);
3777         spin_lock_init(&adapter->adv_rss_lock);
3778
3779         INIT_LIST_HEAD(&adapter->mac_filter_list);
3780         INIT_LIST_HEAD(&adapter->vlan_filter_list);
3781         INIT_LIST_HEAD(&adapter->cloud_filter_list);
3782         INIT_LIST_HEAD(&adapter->fdir_list_head);
3783         INIT_LIST_HEAD(&adapter->adv_rss_list_head);
3784
3785         INIT_WORK(&adapter->reset_task, iavf_reset_task);
3786         INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
3787         INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
3788         INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
3789         INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task);
3790         queue_delayed_work(iavf_wq, &adapter->init_task,
3791                            msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
3792
3793         /* Setup the wait queue for indicating transition to down status */
3794         init_waitqueue_head(&adapter->down_waitqueue);
3795
3796         return 0;
3797
3798 err_ioremap:
3799         free_netdev(netdev);
3800 err_alloc_etherdev:
3801         pci_disable_pcie_error_reporting(pdev);
3802         pci_release_regions(pdev);
3803 err_pci_reg:
3804 err_dma:
3805         pci_disable_device(pdev);
3806         return err;
3807 }
3808
3809 /**
3810  * iavf_suspend - Power management suspend routine
3811  * @dev_d: device info pointer
3812  *
3813  * Called when the system (VM) is entering sleep/suspend.
3814  **/
3815 static int __maybe_unused iavf_suspend(struct device *dev_d)
3816 {
3817         struct net_device *netdev = dev_get_drvdata(dev_d);
3818         struct iavf_adapter *adapter = netdev_priv(netdev);
3819
3820         netif_device_detach(netdev);
3821
3822         while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3823                                 &adapter->crit_section))
3824                 usleep_range(500, 1000);
3825
3826         if (netif_running(netdev)) {
3827                 rtnl_lock();
3828                 iavf_down(adapter);
3829                 rtnl_unlock();
3830         }
3831         iavf_free_misc_irq(adapter);
3832         iavf_reset_interrupt_capability(adapter);
3833
3834         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3835
3836         return 0;
3837 }
3838
3839 /**
3840  * iavf_resume - Power management resume routine
3841  * @dev_d: device info pointer
3842  *
3843  * Called when the system (VM) is resumed from sleep/suspend.
3844  **/
3845 static int __maybe_unused iavf_resume(struct device *dev_d)
3846 {
3847         struct pci_dev *pdev = to_pci_dev(dev_d);
3848         struct net_device *netdev = pci_get_drvdata(pdev);
3849         struct iavf_adapter *adapter = netdev_priv(netdev);
3850         u32 err;
3851
3852         pci_set_master(pdev);
3853
3854         rtnl_lock();
3855         err = iavf_set_interrupt_capability(adapter);
3856         if (err) {
3857                 rtnl_unlock();
3858                 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
3859                 return err;
3860         }
3861         err = iavf_request_misc_irq(adapter);
3862         rtnl_unlock();
3863         if (err) {
3864                 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
3865                 return err;
3866         }
3867
3868         queue_work(iavf_wq, &adapter->reset_task);
3869
3870         netif_device_attach(netdev);
3871
3872         return err;
3873 }
3874
3875 /**
3876  * iavf_remove - Device Removal Routine
3877  * @pdev: PCI device information struct
3878  *
3879  * iavf_remove is called by the PCI subsystem to alert the driver
3880  * that it should release a PCI device.  The could be caused by a
3881  * Hot-Plug event, or because the driver is going to be removed from
3882  * memory.
3883  **/
3884 static void iavf_remove(struct pci_dev *pdev)
3885 {
3886         struct net_device *netdev = pci_get_drvdata(pdev);
3887         struct iavf_adapter *adapter = netdev_priv(netdev);
3888         struct iavf_fdir_fltr *fdir, *fdirtmp;
3889         struct iavf_vlan_filter *vlf, *vlftmp;
3890         struct iavf_adv_rss *rss, *rsstmp;
3891         struct iavf_mac_filter *f, *ftmp;
3892         struct iavf_cloud_filter *cf, *cftmp;
3893         struct iavf_hw *hw = &adapter->hw;
3894         int err;
3895         /* Indicate we are in remove and not to run reset_task */
3896         set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
3897         cancel_delayed_work_sync(&adapter->init_task);
3898         cancel_work_sync(&adapter->reset_task);
3899         cancel_delayed_work_sync(&adapter->client_task);
3900         if (adapter->netdev_registered) {
3901                 unregister_netdev(netdev);
3902                 adapter->netdev_registered = false;
3903         }
3904         if (CLIENT_ALLOWED(adapter)) {
3905                 err = iavf_lan_del_device(adapter);
3906                 if (err)
3907                         dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
3908                                  err);
3909         }
3910
3911         /* Shut down all the garbage mashers on the detention level */
3912         adapter->state = __IAVF_REMOVE;
3913         adapter->aq_required = 0;
3914         adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3915         iavf_request_reset(adapter);
3916         msleep(50);
3917         /* If the FW isn't responding, kick it once, but only once. */
3918         if (!iavf_asq_done(hw)) {
3919                 iavf_request_reset(adapter);
3920                 msleep(50);
3921         }
3922         iavf_free_all_tx_resources(adapter);
3923         iavf_free_all_rx_resources(adapter);
3924         iavf_misc_irq_disable(adapter);
3925         iavf_free_misc_irq(adapter);
3926         iavf_reset_interrupt_capability(adapter);
3927         iavf_free_q_vectors(adapter);
3928
3929         cancel_delayed_work_sync(&adapter->watchdog_task);
3930
3931         cancel_work_sync(&adapter->adminq_task);
3932
3933         iavf_free_rss(adapter);
3934
3935         if (hw->aq.asq.count)
3936                 iavf_shutdown_adminq(hw);
3937
3938         /* destroy the locks only once, here */
3939         mutex_destroy(&hw->aq.arq_mutex);
3940         mutex_destroy(&hw->aq.asq_mutex);
3941
3942         iounmap(hw->hw_addr);
3943         pci_release_regions(pdev);
3944         iavf_free_queues(adapter);
3945         kfree(adapter->vf_res);
3946         spin_lock_bh(&adapter->mac_vlan_list_lock);
3947         /* If we got removed before an up/down sequence, we've got a filter
3948          * hanging out there that we need to get rid of.
3949          */
3950         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3951                 list_del(&f->list);
3952                 kfree(f);
3953         }
3954         list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
3955                                  list) {
3956                 list_del(&vlf->list);
3957                 kfree(vlf);
3958         }
3959
3960         spin_unlock_bh(&adapter->mac_vlan_list_lock);
3961
3962         spin_lock_bh(&adapter->cloud_filter_list_lock);
3963         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
3964                 list_del(&cf->list);
3965                 kfree(cf);
3966         }
3967         spin_unlock_bh(&adapter->cloud_filter_list_lock);
3968
3969         spin_lock_bh(&adapter->fdir_fltr_lock);
3970         list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
3971                 list_del(&fdir->list);
3972                 kfree(fdir);
3973         }
3974         spin_unlock_bh(&adapter->fdir_fltr_lock);
3975
3976         spin_lock_bh(&adapter->adv_rss_lock);
3977         list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
3978                                  list) {
3979                 list_del(&rss->list);
3980                 kfree(rss);
3981         }
3982         spin_unlock_bh(&adapter->adv_rss_lock);
3983
3984         free_netdev(netdev);
3985
3986         pci_disable_pcie_error_reporting(pdev);
3987
3988         pci_disable_device(pdev);
3989 }
3990
3991 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
3992
3993 static struct pci_driver iavf_driver = {
3994         .name      = iavf_driver_name,
3995         .id_table  = iavf_pci_tbl,
3996         .probe     = iavf_probe,
3997         .remove    = iavf_remove,
3998         .driver.pm = &iavf_pm_ops,
3999         .shutdown  = iavf_shutdown,
4000 };
4001
4002 /**
4003  * iavf_init_module - Driver Registration Routine
4004  *
4005  * iavf_init_module is the first routine called when the driver is
4006  * loaded. All it does is register with the PCI subsystem.
4007  **/
4008 static int __init iavf_init_module(void)
4009 {
4010         int ret;
4011
4012         pr_info("iavf: %s\n", iavf_driver_string);
4013
4014         pr_info("%s\n", iavf_copyright);
4015
4016         iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
4017                                   iavf_driver_name);
4018         if (!iavf_wq) {
4019                 pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
4020                 return -ENOMEM;
4021         }
4022         ret = pci_register_driver(&iavf_driver);
4023         return ret;
4024 }
4025
4026 module_init(iavf_init_module);
4027
4028 /**
4029  * iavf_exit_module - Driver Exit Cleanup Routine
4030  *
4031  * iavf_exit_module is called just before the driver is removed
4032  * from memory.
4033  **/
4034 static void __exit iavf_exit_module(void)
4035 {
4036         pci_unregister_driver(&iavf_driver);
4037         destroy_workqueue(iavf_wq);
4038 }
4039
4040 module_exit(iavf_exit_module);
4041
4042 /* iavf_main.c */