atlantic: Fix driver resume flow.
[linux-2.6-microblaze.git] / drivers / net / ethernet / intel / igbvf / netdev.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2009 - 2018 Intel Corporation. */
3
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6 #include <linux/module.h>
7 #include <linux/types.h>
8 #include <linux/init.h>
9 #include <linux/pci.h>
10 #include <linux/vmalloc.h>
11 #include <linux/pagemap.h>
12 #include <linux/delay.h>
13 #include <linux/netdevice.h>
14 #include <linux/tcp.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
17 #include <net/checksum.h>
18 #include <net/ip6_checksum.h>
19 #include <linux/mii.h>
20 #include <linux/ethtool.h>
21 #include <linux/if_vlan.h>
22 #include <linux/prefetch.h>
23 #include <linux/sctp.h>
24
25 #include "igbvf.h"
26
27 char igbvf_driver_name[] = "igbvf";
28 static const char igbvf_driver_string[] =
29                   "Intel(R) Gigabit Virtual Function Network Driver";
30 static const char igbvf_copyright[] =
31                   "Copyright (c) 2009 - 2012 Intel Corporation.";
32
33 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
34 static int debug = -1;
35 module_param(debug, int, 0);
36 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
37
38 static int igbvf_poll(struct napi_struct *napi, int budget);
39 static void igbvf_reset(struct igbvf_adapter *);
40 static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
41 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
42
43 static struct igbvf_info igbvf_vf_info = {
44         .mac            = e1000_vfadapt,
45         .flags          = 0,
46         .pba            = 10,
47         .init_ops       = e1000_init_function_pointers_vf,
48 };
49
50 static struct igbvf_info igbvf_i350_vf_info = {
51         .mac            = e1000_vfadapt_i350,
52         .flags          = 0,
53         .pba            = 10,
54         .init_ops       = e1000_init_function_pointers_vf,
55 };
56
57 static const struct igbvf_info *igbvf_info_tbl[] = {
58         [board_vf]      = &igbvf_vf_info,
59         [board_i350_vf] = &igbvf_i350_vf_info,
60 };
61
62 /**
63  * igbvf_desc_unused - calculate if we have unused descriptors
64  * @ring: address of receive ring structure
65  **/
66 static int igbvf_desc_unused(struct igbvf_ring *ring)
67 {
68         if (ring->next_to_clean > ring->next_to_use)
69                 return ring->next_to_clean - ring->next_to_use - 1;
70
71         return ring->count + ring->next_to_clean - ring->next_to_use - 1;
72 }
73
74 /**
75  * igbvf_receive_skb - helper function to handle Rx indications
76  * @adapter: board private structure
77  * @netdev: pointer to netdev struct
78  * @skb: skb to indicate to stack
79  * @status: descriptor status field as written by hardware
80  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
81  * @skb: pointer to sk_buff to be indicated to stack
82  **/
83 static void igbvf_receive_skb(struct igbvf_adapter *adapter,
84                               struct net_device *netdev,
85                               struct sk_buff *skb,
86                               u32 status, __le16 vlan)
87 {
88         u16 vid;
89
90         if (status & E1000_RXD_STAT_VP) {
91                 if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
92                     (status & E1000_RXDEXT_STATERR_LB))
93                         vid = be16_to_cpu((__force __be16)vlan) & E1000_RXD_SPC_VLAN_MASK;
94                 else
95                         vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
96                 if (test_bit(vid, adapter->active_vlans))
97                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
98         }
99
100         napi_gro_receive(&adapter->rx_ring->napi, skb);
101 }
102
103 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
104                                          u32 status_err, struct sk_buff *skb)
105 {
106         skb_checksum_none_assert(skb);
107
108         /* Ignore Checksum bit is set or checksum is disabled through ethtool */
109         if ((status_err & E1000_RXD_STAT_IXSM) ||
110             (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED))
111                 return;
112
113         /* TCP/UDP checksum error bit is set */
114         if (status_err &
115             (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
116                 /* let the stack verify checksum errors */
117                 adapter->hw_csum_err++;
118                 return;
119         }
120
121         /* It must be a TCP or UDP packet with a valid checksum */
122         if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
123                 skb->ip_summed = CHECKSUM_UNNECESSARY;
124
125         adapter->hw_csum_good++;
126 }
127
128 /**
129  * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
130  * @rx_ring: address of ring structure to repopulate
131  * @cleaned_count: number of buffers to repopulate
132  **/
133 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
134                                    int cleaned_count)
135 {
136         struct igbvf_adapter *adapter = rx_ring->adapter;
137         struct net_device *netdev = adapter->netdev;
138         struct pci_dev *pdev = adapter->pdev;
139         union e1000_adv_rx_desc *rx_desc;
140         struct igbvf_buffer *buffer_info;
141         struct sk_buff *skb;
142         unsigned int i;
143         int bufsz;
144
145         i = rx_ring->next_to_use;
146         buffer_info = &rx_ring->buffer_info[i];
147
148         if (adapter->rx_ps_hdr_size)
149                 bufsz = adapter->rx_ps_hdr_size;
150         else
151                 bufsz = adapter->rx_buffer_len;
152
153         while (cleaned_count--) {
154                 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
155
156                 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
157                         if (!buffer_info->page) {
158                                 buffer_info->page = alloc_page(GFP_ATOMIC);
159                                 if (!buffer_info->page) {
160                                         adapter->alloc_rx_buff_failed++;
161                                         goto no_buffers;
162                                 }
163                                 buffer_info->page_offset = 0;
164                         } else {
165                                 buffer_info->page_offset ^= PAGE_SIZE / 2;
166                         }
167                         buffer_info->page_dma =
168                                 dma_map_page(&pdev->dev, buffer_info->page,
169                                              buffer_info->page_offset,
170                                              PAGE_SIZE / 2,
171                                              DMA_FROM_DEVICE);
172                         if (dma_mapping_error(&pdev->dev,
173                                               buffer_info->page_dma)) {
174                                 __free_page(buffer_info->page);
175                                 buffer_info->page = NULL;
176                                 dev_err(&pdev->dev, "RX DMA map failed\n");
177                                 break;
178                         }
179                 }
180
181                 if (!buffer_info->skb) {
182                         skb = netdev_alloc_skb_ip_align(netdev, bufsz);
183                         if (!skb) {
184                                 adapter->alloc_rx_buff_failed++;
185                                 goto no_buffers;
186                         }
187
188                         buffer_info->skb = skb;
189                         buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
190                                                           bufsz,
191                                                           DMA_FROM_DEVICE);
192                         if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
193                                 dev_kfree_skb(buffer_info->skb);
194                                 buffer_info->skb = NULL;
195                                 dev_err(&pdev->dev, "RX DMA map failed\n");
196                                 goto no_buffers;
197                         }
198                 }
199                 /* Refresh the desc even if buffer_addrs didn't change because
200                  * each write-back erases this info.
201                  */
202                 if (adapter->rx_ps_hdr_size) {
203                         rx_desc->read.pkt_addr =
204                              cpu_to_le64(buffer_info->page_dma);
205                         rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
206                 } else {
207                         rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
208                         rx_desc->read.hdr_addr = 0;
209                 }
210
211                 i++;
212                 if (i == rx_ring->count)
213                         i = 0;
214                 buffer_info = &rx_ring->buffer_info[i];
215         }
216
217 no_buffers:
218         if (rx_ring->next_to_use != i) {
219                 rx_ring->next_to_use = i;
220                 if (i == 0)
221                         i = (rx_ring->count - 1);
222                 else
223                         i--;
224
225                 /* Force memory writes to complete before letting h/w
226                  * know there are new descriptors to fetch.  (Only
227                  * applicable for weak-ordered memory model archs,
228                  * such as IA-64).
229                 */
230                 wmb();
231                 writel(i, adapter->hw.hw_addr + rx_ring->tail);
232         }
233 }
234
235 /**
236  * igbvf_clean_rx_irq - Send received data up the network stack; legacy
237  * @adapter: board private structure
238  * @work_done: output parameter used to indicate completed work
239  * @work_to_do: input parameter setting limit of work
240  *
241  * the return value indicates whether actual cleaning was done, there
242  * is no guarantee that everything was cleaned
243  **/
244 static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
245                                int *work_done, int work_to_do)
246 {
247         struct igbvf_ring *rx_ring = adapter->rx_ring;
248         struct net_device *netdev = adapter->netdev;
249         struct pci_dev *pdev = adapter->pdev;
250         union e1000_adv_rx_desc *rx_desc, *next_rxd;
251         struct igbvf_buffer *buffer_info, *next_buffer;
252         struct sk_buff *skb;
253         bool cleaned = false;
254         int cleaned_count = 0;
255         unsigned int total_bytes = 0, total_packets = 0;
256         unsigned int i;
257         u32 length, hlen, staterr;
258
259         i = rx_ring->next_to_clean;
260         rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
261         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
262
263         while (staterr & E1000_RXD_STAT_DD) {
264                 if (*work_done >= work_to_do)
265                         break;
266                 (*work_done)++;
267                 rmb(); /* read descriptor and rx_buffer_info after status DD */
268
269                 buffer_info = &rx_ring->buffer_info[i];
270
271                 /* HW will not DMA in data larger than the given buffer, even
272                  * if it parses the (NFS, of course) header to be larger.  In
273                  * that case, it fills the header buffer and spills the rest
274                  * into the page.
275                  */
276                 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info)
277                        & E1000_RXDADV_HDRBUFLEN_MASK) >>
278                        E1000_RXDADV_HDRBUFLEN_SHIFT;
279                 if (hlen > adapter->rx_ps_hdr_size)
280                         hlen = adapter->rx_ps_hdr_size;
281
282                 length = le16_to_cpu(rx_desc->wb.upper.length);
283                 cleaned = true;
284                 cleaned_count++;
285
286                 skb = buffer_info->skb;
287                 prefetch(skb->data - NET_IP_ALIGN);
288                 buffer_info->skb = NULL;
289                 if (!adapter->rx_ps_hdr_size) {
290                         dma_unmap_single(&pdev->dev, buffer_info->dma,
291                                          adapter->rx_buffer_len,
292                                          DMA_FROM_DEVICE);
293                         buffer_info->dma = 0;
294                         skb_put(skb, length);
295                         goto send_up;
296                 }
297
298                 if (!skb_shinfo(skb)->nr_frags) {
299                         dma_unmap_single(&pdev->dev, buffer_info->dma,
300                                          adapter->rx_ps_hdr_size,
301                                          DMA_FROM_DEVICE);
302                         buffer_info->dma = 0;
303                         skb_put(skb, hlen);
304                 }
305
306                 if (length) {
307                         dma_unmap_page(&pdev->dev, buffer_info->page_dma,
308                                        PAGE_SIZE / 2,
309                                        DMA_FROM_DEVICE);
310                         buffer_info->page_dma = 0;
311
312                         skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
313                                            buffer_info->page,
314                                            buffer_info->page_offset,
315                                            length);
316
317                         if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
318                             (page_count(buffer_info->page) != 1))
319                                 buffer_info->page = NULL;
320                         else
321                                 get_page(buffer_info->page);
322
323                         skb->len += length;
324                         skb->data_len += length;
325                         skb->truesize += PAGE_SIZE / 2;
326                 }
327 send_up:
328                 i++;
329                 if (i == rx_ring->count)
330                         i = 0;
331                 next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i);
332                 prefetch(next_rxd);
333                 next_buffer = &rx_ring->buffer_info[i];
334
335                 if (!(staterr & E1000_RXD_STAT_EOP)) {
336                         buffer_info->skb = next_buffer->skb;
337                         buffer_info->dma = next_buffer->dma;
338                         next_buffer->skb = skb;
339                         next_buffer->dma = 0;
340                         goto next_desc;
341                 }
342
343                 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
344                         dev_kfree_skb_irq(skb);
345                         goto next_desc;
346                 }
347
348                 total_bytes += skb->len;
349                 total_packets++;
350
351                 igbvf_rx_checksum_adv(adapter, staterr, skb);
352
353                 skb->protocol = eth_type_trans(skb, netdev);
354
355                 igbvf_receive_skb(adapter, netdev, skb, staterr,
356                                   rx_desc->wb.upper.vlan);
357
358 next_desc:
359                 rx_desc->wb.upper.status_error = 0;
360
361                 /* return some buffers to hardware, one at a time is too slow */
362                 if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) {
363                         igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
364                         cleaned_count = 0;
365                 }
366
367                 /* use prefetched values */
368                 rx_desc = next_rxd;
369                 buffer_info = next_buffer;
370
371                 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
372         }
373
374         rx_ring->next_to_clean = i;
375         cleaned_count = igbvf_desc_unused(rx_ring);
376
377         if (cleaned_count)
378                 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
379
380         adapter->total_rx_packets += total_packets;
381         adapter->total_rx_bytes += total_bytes;
382         netdev->stats.rx_bytes += total_bytes;
383         netdev->stats.rx_packets += total_packets;
384         return cleaned;
385 }
386
387 static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
388                             struct igbvf_buffer *buffer_info)
389 {
390         if (buffer_info->dma) {
391                 if (buffer_info->mapped_as_page)
392                         dma_unmap_page(&adapter->pdev->dev,
393                                        buffer_info->dma,
394                                        buffer_info->length,
395                                        DMA_TO_DEVICE);
396                 else
397                         dma_unmap_single(&adapter->pdev->dev,
398                                          buffer_info->dma,
399                                          buffer_info->length,
400                                          DMA_TO_DEVICE);
401                 buffer_info->dma = 0;
402         }
403         if (buffer_info->skb) {
404                 dev_kfree_skb_any(buffer_info->skb);
405                 buffer_info->skb = NULL;
406         }
407         buffer_info->time_stamp = 0;
408 }
409
410 /**
411  * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
412  * @adapter: board private structure
413  * @tx_ring: ring being initialized
414  *
415  * Return 0 on success, negative on failure
416  **/
417 int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
418                              struct igbvf_ring *tx_ring)
419 {
420         struct pci_dev *pdev = adapter->pdev;
421         int size;
422
423         size = sizeof(struct igbvf_buffer) * tx_ring->count;
424         tx_ring->buffer_info = vzalloc(size);
425         if (!tx_ring->buffer_info)
426                 goto err;
427
428         /* round up to nearest 4K */
429         tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
430         tx_ring->size = ALIGN(tx_ring->size, 4096);
431
432         tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
433                                            &tx_ring->dma, GFP_KERNEL);
434         if (!tx_ring->desc)
435                 goto err;
436
437         tx_ring->adapter = adapter;
438         tx_ring->next_to_use = 0;
439         tx_ring->next_to_clean = 0;
440
441         return 0;
442 err:
443         vfree(tx_ring->buffer_info);
444         dev_err(&adapter->pdev->dev,
445                 "Unable to allocate memory for the transmit descriptor ring\n");
446         return -ENOMEM;
447 }
448
449 /**
450  * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
451  * @adapter: board private structure
452  * @rx_ring: ring being initialized
453  *
454  * Returns 0 on success, negative on failure
455  **/
456 int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
457                              struct igbvf_ring *rx_ring)
458 {
459         struct pci_dev *pdev = adapter->pdev;
460         int size, desc_len;
461
462         size = sizeof(struct igbvf_buffer) * rx_ring->count;
463         rx_ring->buffer_info = vzalloc(size);
464         if (!rx_ring->buffer_info)
465                 goto err;
466
467         desc_len = sizeof(union e1000_adv_rx_desc);
468
469         /* Round up to nearest 4K */
470         rx_ring->size = rx_ring->count * desc_len;
471         rx_ring->size = ALIGN(rx_ring->size, 4096);
472
473         rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
474                                            &rx_ring->dma, GFP_KERNEL);
475         if (!rx_ring->desc)
476                 goto err;
477
478         rx_ring->next_to_clean = 0;
479         rx_ring->next_to_use = 0;
480
481         rx_ring->adapter = adapter;
482
483         return 0;
484
485 err:
486         vfree(rx_ring->buffer_info);
487         rx_ring->buffer_info = NULL;
488         dev_err(&adapter->pdev->dev,
489                 "Unable to allocate memory for the receive descriptor ring\n");
490         return -ENOMEM;
491 }
492
493 /**
494  * igbvf_clean_tx_ring - Free Tx Buffers
495  * @tx_ring: ring to be cleaned
496  **/
497 static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
498 {
499         struct igbvf_adapter *adapter = tx_ring->adapter;
500         struct igbvf_buffer *buffer_info;
501         unsigned long size;
502         unsigned int i;
503
504         if (!tx_ring->buffer_info)
505                 return;
506
507         /* Free all the Tx ring sk_buffs */
508         for (i = 0; i < tx_ring->count; i++) {
509                 buffer_info = &tx_ring->buffer_info[i];
510                 igbvf_put_txbuf(adapter, buffer_info);
511         }
512
513         size = sizeof(struct igbvf_buffer) * tx_ring->count;
514         memset(tx_ring->buffer_info, 0, size);
515
516         /* Zero out the descriptor ring */
517         memset(tx_ring->desc, 0, tx_ring->size);
518
519         tx_ring->next_to_use = 0;
520         tx_ring->next_to_clean = 0;
521
522         writel(0, adapter->hw.hw_addr + tx_ring->head);
523         writel(0, adapter->hw.hw_addr + tx_ring->tail);
524 }
525
526 /**
527  * igbvf_free_tx_resources - Free Tx Resources per Queue
528  * @tx_ring: ring to free resources from
529  *
530  * Free all transmit software resources
531  **/
532 void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
533 {
534         struct pci_dev *pdev = tx_ring->adapter->pdev;
535
536         igbvf_clean_tx_ring(tx_ring);
537
538         vfree(tx_ring->buffer_info);
539         tx_ring->buffer_info = NULL;
540
541         dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
542                           tx_ring->dma);
543
544         tx_ring->desc = NULL;
545 }
546
547 /**
548  * igbvf_clean_rx_ring - Free Rx Buffers per Queue
549  * @rx_ring: ring structure pointer to free buffers from
550  **/
551 static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
552 {
553         struct igbvf_adapter *adapter = rx_ring->adapter;
554         struct igbvf_buffer *buffer_info;
555         struct pci_dev *pdev = adapter->pdev;
556         unsigned long size;
557         unsigned int i;
558
559         if (!rx_ring->buffer_info)
560                 return;
561
562         /* Free all the Rx ring sk_buffs */
563         for (i = 0; i < rx_ring->count; i++) {
564                 buffer_info = &rx_ring->buffer_info[i];
565                 if (buffer_info->dma) {
566                         if (adapter->rx_ps_hdr_size) {
567                                 dma_unmap_single(&pdev->dev, buffer_info->dma,
568                                                  adapter->rx_ps_hdr_size,
569                                                  DMA_FROM_DEVICE);
570                         } else {
571                                 dma_unmap_single(&pdev->dev, buffer_info->dma,
572                                                  adapter->rx_buffer_len,
573                                                  DMA_FROM_DEVICE);
574                         }
575                         buffer_info->dma = 0;
576                 }
577
578                 if (buffer_info->skb) {
579                         dev_kfree_skb(buffer_info->skb);
580                         buffer_info->skb = NULL;
581                 }
582
583                 if (buffer_info->page) {
584                         if (buffer_info->page_dma)
585                                 dma_unmap_page(&pdev->dev,
586                                                buffer_info->page_dma,
587                                                PAGE_SIZE / 2,
588                                                DMA_FROM_DEVICE);
589                         put_page(buffer_info->page);
590                         buffer_info->page = NULL;
591                         buffer_info->page_dma = 0;
592                         buffer_info->page_offset = 0;
593                 }
594         }
595
596         size = sizeof(struct igbvf_buffer) * rx_ring->count;
597         memset(rx_ring->buffer_info, 0, size);
598
599         /* Zero out the descriptor ring */
600         memset(rx_ring->desc, 0, rx_ring->size);
601
602         rx_ring->next_to_clean = 0;
603         rx_ring->next_to_use = 0;
604
605         writel(0, adapter->hw.hw_addr + rx_ring->head);
606         writel(0, adapter->hw.hw_addr + rx_ring->tail);
607 }
608
609 /**
610  * igbvf_free_rx_resources - Free Rx Resources
611  * @rx_ring: ring to clean the resources from
612  *
613  * Free all receive software resources
614  **/
615
616 void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
617 {
618         struct pci_dev *pdev = rx_ring->adapter->pdev;
619
620         igbvf_clean_rx_ring(rx_ring);
621
622         vfree(rx_ring->buffer_info);
623         rx_ring->buffer_info = NULL;
624
625         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
626                           rx_ring->dma);
627         rx_ring->desc = NULL;
628 }
629
630 /**
631  * igbvf_update_itr - update the dynamic ITR value based on statistics
632  * @adapter: pointer to adapter
633  * @itr_setting: current adapter->itr
634  * @packets: the number of packets during this measurement interval
635  * @bytes: the number of bytes during this measurement interval
636  *
637  * Stores a new ITR value based on packets and byte counts during the last
638  * interrupt.  The advantage of per interrupt computation is faster updates
639  * and more accurate ITR for the current traffic pattern.  Constants in this
640  * function were computed based on theoretical maximum wire speed and thresholds
641  * were set based on testing data as well as attempting to minimize response
642  * time while increasing bulk throughput.
643  **/
644 static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter,
645                                            enum latency_range itr_setting,
646                                            int packets, int bytes)
647 {
648         enum latency_range retval = itr_setting;
649
650         if (packets == 0)
651                 goto update_itr_done;
652
653         switch (itr_setting) {
654         case lowest_latency:
655                 /* handle TSO and jumbo frames */
656                 if (bytes/packets > 8000)
657                         retval = bulk_latency;
658                 else if ((packets < 5) && (bytes > 512))
659                         retval = low_latency;
660                 break;
661         case low_latency:  /* 50 usec aka 20000 ints/s */
662                 if (bytes > 10000) {
663                         /* this if handles the TSO accounting */
664                         if (bytes/packets > 8000)
665                                 retval = bulk_latency;
666                         else if ((packets < 10) || ((bytes/packets) > 1200))
667                                 retval = bulk_latency;
668                         else if ((packets > 35))
669                                 retval = lowest_latency;
670                 } else if (bytes/packets > 2000) {
671                         retval = bulk_latency;
672                 } else if (packets <= 2 && bytes < 512) {
673                         retval = lowest_latency;
674                 }
675                 break;
676         case bulk_latency: /* 250 usec aka 4000 ints/s */
677                 if (bytes > 25000) {
678                         if (packets > 35)
679                                 retval = low_latency;
680                 } else if (bytes < 6000) {
681                         retval = low_latency;
682                 }
683                 break;
684         default:
685                 break;
686         }
687
688 update_itr_done:
689         return retval;
690 }
691
692 static int igbvf_range_to_itr(enum latency_range current_range)
693 {
694         int new_itr;
695
696         switch (current_range) {
697         /* counts and packets in update_itr are dependent on these numbers */
698         case lowest_latency:
699                 new_itr = IGBVF_70K_ITR;
700                 break;
701         case low_latency:
702                 new_itr = IGBVF_20K_ITR;
703                 break;
704         case bulk_latency:
705                 new_itr = IGBVF_4K_ITR;
706                 break;
707         default:
708                 new_itr = IGBVF_START_ITR;
709                 break;
710         }
711         return new_itr;
712 }
713
714 static void igbvf_set_itr(struct igbvf_adapter *adapter)
715 {
716         u32 new_itr;
717
718         adapter->tx_ring->itr_range =
719                         igbvf_update_itr(adapter,
720                                          adapter->tx_ring->itr_val,
721                                          adapter->total_tx_packets,
722                                          adapter->total_tx_bytes);
723
724         /* conservative mode (itr 3) eliminates the lowest_latency setting */
725         if (adapter->requested_itr == 3 &&
726             adapter->tx_ring->itr_range == lowest_latency)
727                 adapter->tx_ring->itr_range = low_latency;
728
729         new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range);
730
731         if (new_itr != adapter->tx_ring->itr_val) {
732                 u32 current_itr = adapter->tx_ring->itr_val;
733                 /* this attempts to bias the interrupt rate towards Bulk
734                  * by adding intermediate steps when interrupt rate is
735                  * increasing
736                  */
737                 new_itr = new_itr > current_itr ?
738                           min(current_itr + (new_itr >> 2), new_itr) :
739                           new_itr;
740                 adapter->tx_ring->itr_val = new_itr;
741
742                 adapter->tx_ring->set_itr = 1;
743         }
744
745         adapter->rx_ring->itr_range =
746                         igbvf_update_itr(adapter, adapter->rx_ring->itr_val,
747                                          adapter->total_rx_packets,
748                                          adapter->total_rx_bytes);
749         if (adapter->requested_itr == 3 &&
750             adapter->rx_ring->itr_range == lowest_latency)
751                 adapter->rx_ring->itr_range = low_latency;
752
753         new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range);
754
755         if (new_itr != adapter->rx_ring->itr_val) {
756                 u32 current_itr = adapter->rx_ring->itr_val;
757
758                 new_itr = new_itr > current_itr ?
759                           min(current_itr + (new_itr >> 2), new_itr) :
760                           new_itr;
761                 adapter->rx_ring->itr_val = new_itr;
762
763                 adapter->rx_ring->set_itr = 1;
764         }
765 }
766
767 /**
768  * igbvf_clean_tx_irq - Reclaim resources after transmit completes
769  * @tx_ring: ring structure to clean descriptors from
770  *
771  * returns true if ring is completely cleaned
772  **/
773 static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
774 {
775         struct igbvf_adapter *adapter = tx_ring->adapter;
776         struct net_device *netdev = adapter->netdev;
777         struct igbvf_buffer *buffer_info;
778         struct sk_buff *skb;
779         union e1000_adv_tx_desc *tx_desc, *eop_desc;
780         unsigned int total_bytes = 0, total_packets = 0;
781         unsigned int i, count = 0;
782         bool cleaned = false;
783
784         i = tx_ring->next_to_clean;
785         buffer_info = &tx_ring->buffer_info[i];
786         eop_desc = buffer_info->next_to_watch;
787
788         do {
789                 /* if next_to_watch is not set then there is no work pending */
790                 if (!eop_desc)
791                         break;
792
793                 /* prevent any other reads prior to eop_desc */
794                 smp_rmb();
795
796                 /* if DD is not set pending work has not been completed */
797                 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
798                         break;
799
800                 /* clear next_to_watch to prevent false hangs */
801                 buffer_info->next_to_watch = NULL;
802
803                 for (cleaned = false; !cleaned; count++) {
804                         tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
805                         cleaned = (tx_desc == eop_desc);
806                         skb = buffer_info->skb;
807
808                         if (skb) {
809                                 unsigned int segs, bytecount;
810
811                                 /* gso_segs is currently only valid for tcp */
812                                 segs = skb_shinfo(skb)->gso_segs ?: 1;
813                                 /* multiply data chunks by size of headers */
814                                 bytecount = ((segs - 1) * skb_headlen(skb)) +
815                                             skb->len;
816                                 total_packets += segs;
817                                 total_bytes += bytecount;
818                         }
819
820                         igbvf_put_txbuf(adapter, buffer_info);
821                         tx_desc->wb.status = 0;
822
823                         i++;
824                         if (i == tx_ring->count)
825                                 i = 0;
826
827                         buffer_info = &tx_ring->buffer_info[i];
828                 }
829
830                 eop_desc = buffer_info->next_to_watch;
831         } while (count < tx_ring->count);
832
833         tx_ring->next_to_clean = i;
834
835         if (unlikely(count && netif_carrier_ok(netdev) &&
836             igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
837                 /* Make sure that anybody stopping the queue after this
838                  * sees the new next_to_clean.
839                  */
840                 smp_mb();
841                 if (netif_queue_stopped(netdev) &&
842                     !(test_bit(__IGBVF_DOWN, &adapter->state))) {
843                         netif_wake_queue(netdev);
844                         ++adapter->restart_queue;
845                 }
846         }
847
848         netdev->stats.tx_bytes += total_bytes;
849         netdev->stats.tx_packets += total_packets;
850         return count < tx_ring->count;
851 }
852
853 static irqreturn_t igbvf_msix_other(int irq, void *data)
854 {
855         struct net_device *netdev = data;
856         struct igbvf_adapter *adapter = netdev_priv(netdev);
857         struct e1000_hw *hw = &adapter->hw;
858
859         adapter->int_counter1++;
860
861         hw->mac.get_link_status = 1;
862         if (!test_bit(__IGBVF_DOWN, &adapter->state))
863                 mod_timer(&adapter->watchdog_timer, jiffies + 1);
864
865         ew32(EIMS, adapter->eims_other);
866
867         return IRQ_HANDLED;
868 }
869
870 static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
871 {
872         struct net_device *netdev = data;
873         struct igbvf_adapter *adapter = netdev_priv(netdev);
874         struct e1000_hw *hw = &adapter->hw;
875         struct igbvf_ring *tx_ring = adapter->tx_ring;
876
877         if (tx_ring->set_itr) {
878                 writel(tx_ring->itr_val,
879                        adapter->hw.hw_addr + tx_ring->itr_register);
880                 adapter->tx_ring->set_itr = 0;
881         }
882
883         adapter->total_tx_bytes = 0;
884         adapter->total_tx_packets = 0;
885
886         /* auto mask will automatically re-enable the interrupt when we write
887          * EICS
888          */
889         if (!igbvf_clean_tx_irq(tx_ring))
890                 /* Ring was not completely cleaned, so fire another interrupt */
891                 ew32(EICS, tx_ring->eims_value);
892         else
893                 ew32(EIMS, tx_ring->eims_value);
894
895         return IRQ_HANDLED;
896 }
897
898 static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
899 {
900         struct net_device *netdev = data;
901         struct igbvf_adapter *adapter = netdev_priv(netdev);
902
903         adapter->int_counter0++;
904
905         /* Write the ITR value calculated at the end of the
906          * previous interrupt.
907          */
908         if (adapter->rx_ring->set_itr) {
909                 writel(adapter->rx_ring->itr_val,
910                        adapter->hw.hw_addr + adapter->rx_ring->itr_register);
911                 adapter->rx_ring->set_itr = 0;
912         }
913
914         if (napi_schedule_prep(&adapter->rx_ring->napi)) {
915                 adapter->total_rx_bytes = 0;
916                 adapter->total_rx_packets = 0;
917                 __napi_schedule(&adapter->rx_ring->napi);
918         }
919
920         return IRQ_HANDLED;
921 }
922
923 #define IGBVF_NO_QUEUE -1
924
925 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
926                                 int tx_queue, int msix_vector)
927 {
928         struct e1000_hw *hw = &adapter->hw;
929         u32 ivar, index;
930
931         /* 82576 uses a table-based method for assigning vectors.
932          * Each queue has a single entry in the table to which we write
933          * a vector number along with a "valid" bit.  Sadly, the layout
934          * of the table is somewhat counterintuitive.
935          */
936         if (rx_queue > IGBVF_NO_QUEUE) {
937                 index = (rx_queue >> 1);
938                 ivar = array_er32(IVAR0, index);
939                 if (rx_queue & 0x1) {
940                         /* vector goes into third byte of register */
941                         ivar = ivar & 0xFF00FFFF;
942                         ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
943                 } else {
944                         /* vector goes into low byte of register */
945                         ivar = ivar & 0xFFFFFF00;
946                         ivar |= msix_vector | E1000_IVAR_VALID;
947                 }
948                 adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector);
949                 array_ew32(IVAR0, index, ivar);
950         }
951         if (tx_queue > IGBVF_NO_QUEUE) {
952                 index = (tx_queue >> 1);
953                 ivar = array_er32(IVAR0, index);
954                 if (tx_queue & 0x1) {
955                         /* vector goes into high byte of register */
956                         ivar = ivar & 0x00FFFFFF;
957                         ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
958                 } else {
959                         /* vector goes into second byte of register */
960                         ivar = ivar & 0xFFFF00FF;
961                         ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
962                 }
963                 adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector);
964                 array_ew32(IVAR0, index, ivar);
965         }
966 }
967
968 /**
969  * igbvf_configure_msix - Configure MSI-X hardware
970  * @adapter: board private structure
971  *
972  * igbvf_configure_msix sets up the hardware to properly
973  * generate MSI-X interrupts.
974  **/
975 static void igbvf_configure_msix(struct igbvf_adapter *adapter)
976 {
977         u32 tmp;
978         struct e1000_hw *hw = &adapter->hw;
979         struct igbvf_ring *tx_ring = adapter->tx_ring;
980         struct igbvf_ring *rx_ring = adapter->rx_ring;
981         int vector = 0;
982
983         adapter->eims_enable_mask = 0;
984
985         igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
986         adapter->eims_enable_mask |= tx_ring->eims_value;
987         writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register);
988         igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
989         adapter->eims_enable_mask |= rx_ring->eims_value;
990         writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register);
991
992         /* set vector for other causes, i.e. link changes */
993
994         tmp = (vector++ | E1000_IVAR_VALID);
995
996         ew32(IVAR_MISC, tmp);
997
998         adapter->eims_enable_mask = GENMASK(vector - 1, 0);
999         adapter->eims_other = BIT(vector - 1);
1000         e1e_flush();
1001 }
1002
1003 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
1004 {
1005         if (adapter->msix_entries) {
1006                 pci_disable_msix(adapter->pdev);
1007                 kfree(adapter->msix_entries);
1008                 adapter->msix_entries = NULL;
1009         }
1010 }
1011
1012 /**
1013  * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
1014  * @adapter: board private structure
1015  *
1016  * Attempt to configure interrupts using the best available
1017  * capabilities of the hardware and kernel.
1018  **/
1019 static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
1020 {
1021         int err = -ENOMEM;
1022         int i;
1023
1024         /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */
1025         adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
1026                                         GFP_KERNEL);
1027         if (adapter->msix_entries) {
1028                 for (i = 0; i < 3; i++)
1029                         adapter->msix_entries[i].entry = i;
1030
1031                 err = pci_enable_msix_range(adapter->pdev,
1032                                             adapter->msix_entries, 3, 3);
1033         }
1034
1035         if (err < 0) {
1036                 /* MSI-X failed */
1037                 dev_err(&adapter->pdev->dev,
1038                         "Failed to initialize MSI-X interrupts.\n");
1039                 igbvf_reset_interrupt_capability(adapter);
1040         }
1041 }
1042
1043 /**
1044  * igbvf_request_msix - Initialize MSI-X interrupts
1045  * @adapter: board private structure
1046  *
1047  * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1048  * kernel.
1049  **/
1050 static int igbvf_request_msix(struct igbvf_adapter *adapter)
1051 {
1052         struct net_device *netdev = adapter->netdev;
1053         int err = 0, vector = 0;
1054
1055         if (strlen(netdev->name) < (IFNAMSIZ - 5)) {
1056                 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1057                 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1058         } else {
1059                 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1060                 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1061         }
1062
1063         err = request_irq(adapter->msix_entries[vector].vector,
1064                           igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
1065                           netdev);
1066         if (err)
1067                 goto out;
1068
1069         adapter->tx_ring->itr_register = E1000_EITR(vector);
1070         adapter->tx_ring->itr_val = adapter->current_itr;
1071         vector++;
1072
1073         err = request_irq(adapter->msix_entries[vector].vector,
1074                           igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
1075                           netdev);
1076         if (err)
1077                 goto out;
1078
1079         adapter->rx_ring->itr_register = E1000_EITR(vector);
1080         adapter->rx_ring->itr_val = adapter->current_itr;
1081         vector++;
1082
1083         err = request_irq(adapter->msix_entries[vector].vector,
1084                           igbvf_msix_other, 0, netdev->name, netdev);
1085         if (err)
1086                 goto out;
1087
1088         igbvf_configure_msix(adapter);
1089         return 0;
1090 out:
1091         return err;
1092 }
1093
1094 /**
1095  * igbvf_alloc_queues - Allocate memory for all rings
1096  * @adapter: board private structure to initialize
1097  **/
1098 static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
1099 {
1100         struct net_device *netdev = adapter->netdev;
1101
1102         adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1103         if (!adapter->tx_ring)
1104                 return -ENOMEM;
1105
1106         adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1107         if (!adapter->rx_ring) {
1108                 kfree(adapter->tx_ring);
1109                 return -ENOMEM;
1110         }
1111
1112         netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
1113
1114         return 0;
1115 }
1116
1117 /**
1118  * igbvf_request_irq - initialize interrupts
1119  * @adapter: board private structure
1120  *
1121  * Attempts to configure interrupts using the best available
1122  * capabilities of the hardware and kernel.
1123  **/
1124 static int igbvf_request_irq(struct igbvf_adapter *adapter)
1125 {
1126         int err = -1;
1127
1128         /* igbvf supports msi-x only */
1129         if (adapter->msix_entries)
1130                 err = igbvf_request_msix(adapter);
1131
1132         if (!err)
1133                 return err;
1134
1135         dev_err(&adapter->pdev->dev,
1136                 "Unable to allocate interrupt, Error: %d\n", err);
1137
1138         return err;
1139 }
1140
1141 static void igbvf_free_irq(struct igbvf_adapter *adapter)
1142 {
1143         struct net_device *netdev = adapter->netdev;
1144         int vector;
1145
1146         if (adapter->msix_entries) {
1147                 for (vector = 0; vector < 3; vector++)
1148                         free_irq(adapter->msix_entries[vector].vector, netdev);
1149         }
1150 }
1151
1152 /**
1153  * igbvf_irq_disable - Mask off interrupt generation on the NIC
1154  * @adapter: board private structure
1155  **/
1156 static void igbvf_irq_disable(struct igbvf_adapter *adapter)
1157 {
1158         struct e1000_hw *hw = &adapter->hw;
1159
1160         ew32(EIMC, ~0);
1161
1162         if (adapter->msix_entries)
1163                 ew32(EIAC, 0);
1164 }
1165
1166 /**
1167  * igbvf_irq_enable - Enable default interrupt generation settings
1168  * @adapter: board private structure
1169  **/
1170 static void igbvf_irq_enable(struct igbvf_adapter *adapter)
1171 {
1172         struct e1000_hw *hw = &adapter->hw;
1173
1174         ew32(EIAC, adapter->eims_enable_mask);
1175         ew32(EIAM, adapter->eims_enable_mask);
1176         ew32(EIMS, adapter->eims_enable_mask);
1177 }
1178
1179 /**
1180  * igbvf_poll - NAPI Rx polling callback
1181  * @napi: struct associated with this polling callback
1182  * @budget: amount of packets driver is allowed to process this poll
1183  **/
1184 static int igbvf_poll(struct napi_struct *napi, int budget)
1185 {
1186         struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi);
1187         struct igbvf_adapter *adapter = rx_ring->adapter;
1188         struct e1000_hw *hw = &adapter->hw;
1189         int work_done = 0;
1190
1191         igbvf_clean_rx_irq(adapter, &work_done, budget);
1192
1193         if (work_done == budget)
1194                 return budget;
1195
1196         /* Exit the polling mode, but don't re-enable interrupts if stack might
1197          * poll us due to busy-polling
1198          */
1199         if (likely(napi_complete_done(napi, work_done))) {
1200                 if (adapter->requested_itr & 3)
1201                         igbvf_set_itr(adapter);
1202
1203                 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1204                         ew32(EIMS, adapter->rx_ring->eims_value);
1205         }
1206
1207         return work_done;
1208 }
1209
1210 /**
1211  * igbvf_set_rlpml - set receive large packet maximum length
1212  * @adapter: board private structure
1213  *
1214  * Configure the maximum size of packets that will be received
1215  */
1216 static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
1217 {
1218         int max_frame_size;
1219         struct e1000_hw *hw = &adapter->hw;
1220
1221         max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
1222
1223         spin_lock_bh(&hw->mbx_lock);
1224
1225         e1000_rlpml_set_vf(hw, max_frame_size);
1226
1227         spin_unlock_bh(&hw->mbx_lock);
1228 }
1229
1230 static int igbvf_vlan_rx_add_vid(struct net_device *netdev,
1231                                  __be16 proto, u16 vid)
1232 {
1233         struct igbvf_adapter *adapter = netdev_priv(netdev);
1234         struct e1000_hw *hw = &adapter->hw;
1235
1236         spin_lock_bh(&hw->mbx_lock);
1237
1238         if (hw->mac.ops.set_vfta(hw, vid, true)) {
1239                 dev_warn(&adapter->pdev->dev, "Vlan id %d\n is not added", vid);
1240                 spin_unlock_bh(&hw->mbx_lock);
1241                 return -EINVAL;
1242         }
1243
1244         spin_unlock_bh(&hw->mbx_lock);
1245
1246         set_bit(vid, adapter->active_vlans);
1247         return 0;
1248 }
1249
1250 static int igbvf_vlan_rx_kill_vid(struct net_device *netdev,
1251                                   __be16 proto, u16 vid)
1252 {
1253         struct igbvf_adapter *adapter = netdev_priv(netdev);
1254         struct e1000_hw *hw = &adapter->hw;
1255
1256         spin_lock_bh(&hw->mbx_lock);
1257
1258         if (hw->mac.ops.set_vfta(hw, vid, false)) {
1259                 dev_err(&adapter->pdev->dev,
1260                         "Failed to remove vlan id %d\n", vid);
1261                 spin_unlock_bh(&hw->mbx_lock);
1262                 return -EINVAL;
1263         }
1264
1265         spin_unlock_bh(&hw->mbx_lock);
1266
1267         clear_bit(vid, adapter->active_vlans);
1268         return 0;
1269 }
1270
1271 static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
1272 {
1273         u16 vid;
1274
1275         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1276                 igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
1277 }
1278
1279 /**
1280  * igbvf_configure_tx - Configure Transmit Unit after Reset
1281  * @adapter: board private structure
1282  *
1283  * Configure the Tx unit of the MAC after a reset.
1284  **/
1285 static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1286 {
1287         struct e1000_hw *hw = &adapter->hw;
1288         struct igbvf_ring *tx_ring = adapter->tx_ring;
1289         u64 tdba;
1290         u32 txdctl, dca_txctrl;
1291
1292         /* disable transmits */
1293         txdctl = er32(TXDCTL(0));
1294         ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1295         e1e_flush();
1296         msleep(10);
1297
1298         /* Setup the HW Tx Head and Tail descriptor pointers */
1299         ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
1300         tdba = tx_ring->dma;
1301         ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
1302         ew32(TDBAH(0), (tdba >> 32));
1303         ew32(TDH(0), 0);
1304         ew32(TDT(0), 0);
1305         tx_ring->head = E1000_TDH(0);
1306         tx_ring->tail = E1000_TDT(0);
1307
1308         /* Turn off Relaxed Ordering on head write-backs.  The writebacks
1309          * MUST be delivered in order or it will completely screw up
1310          * our bookkeeping.
1311          */
1312         dca_txctrl = er32(DCA_TXCTRL(0));
1313         dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1314         ew32(DCA_TXCTRL(0), dca_txctrl);
1315
1316         /* enable transmits */
1317         txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1318         ew32(TXDCTL(0), txdctl);
1319
1320         /* Setup Transmit Descriptor Settings for eop descriptor */
1321         adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS;
1322
1323         /* enable Report Status bit */
1324         adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
1325 }
1326
1327 /**
1328  * igbvf_setup_srrctl - configure the receive control registers
1329  * @adapter: Board private structure
1330  **/
1331 static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
1332 {
1333         struct e1000_hw *hw = &adapter->hw;
1334         u32 srrctl = 0;
1335
1336         srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
1337                     E1000_SRRCTL_BSIZEHDR_MASK |
1338                     E1000_SRRCTL_BSIZEPKT_MASK);
1339
1340         /* Enable queue drop to avoid head of line blocking */
1341         srrctl |= E1000_SRRCTL_DROP_EN;
1342
1343         /* Setup buffer sizes */
1344         srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
1345                   E1000_SRRCTL_BSIZEPKT_SHIFT;
1346
1347         if (adapter->rx_buffer_len < 2048) {
1348                 adapter->rx_ps_hdr_size = 0;
1349                 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1350         } else {
1351                 adapter->rx_ps_hdr_size = 128;
1352                 srrctl |= adapter->rx_ps_hdr_size <<
1353                           E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1354                 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1355         }
1356
1357         ew32(SRRCTL(0), srrctl);
1358 }
1359
1360 /**
1361  * igbvf_configure_rx - Configure Receive Unit after Reset
1362  * @adapter: board private structure
1363  *
1364  * Configure the Rx unit of the MAC after a reset.
1365  **/
1366 static void igbvf_configure_rx(struct igbvf_adapter *adapter)
1367 {
1368         struct e1000_hw *hw = &adapter->hw;
1369         struct igbvf_ring *rx_ring = adapter->rx_ring;
1370         u64 rdba;
1371         u32 rxdctl;
1372
1373         /* disable receives */
1374         rxdctl = er32(RXDCTL(0));
1375         ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1376         e1e_flush();
1377         msleep(10);
1378
1379         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1380          * the Base and Length of the Rx Descriptor Ring
1381          */
1382         rdba = rx_ring->dma;
1383         ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
1384         ew32(RDBAH(0), (rdba >> 32));
1385         ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
1386         rx_ring->head = E1000_RDH(0);
1387         rx_ring->tail = E1000_RDT(0);
1388         ew32(RDH(0), 0);
1389         ew32(RDT(0), 0);
1390
1391         rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1392         rxdctl &= 0xFFF00000;
1393         rxdctl |= IGBVF_RX_PTHRESH;
1394         rxdctl |= IGBVF_RX_HTHRESH << 8;
1395         rxdctl |= IGBVF_RX_WTHRESH << 16;
1396
1397         igbvf_set_rlpml(adapter);
1398
1399         /* enable receives */
1400         ew32(RXDCTL(0), rxdctl);
1401 }
1402
1403 /**
1404  * igbvf_set_multi - Multicast and Promiscuous mode set
1405  * @netdev: network interface device structure
1406  *
1407  * The set_multi entry point is called whenever the multicast address
1408  * list or the network interface flags are updated.  This routine is
1409  * responsible for configuring the hardware for proper multicast,
1410  * promiscuous mode, and all-multi behavior.
1411  **/
1412 static void igbvf_set_multi(struct net_device *netdev)
1413 {
1414         struct igbvf_adapter *adapter = netdev_priv(netdev);
1415         struct e1000_hw *hw = &adapter->hw;
1416         struct netdev_hw_addr *ha;
1417         u8  *mta_list = NULL;
1418         int i;
1419
1420         if (!netdev_mc_empty(netdev)) {
1421                 mta_list = kmalloc_array(netdev_mc_count(netdev), ETH_ALEN,
1422                                          GFP_ATOMIC);
1423                 if (!mta_list)
1424                         return;
1425         }
1426
1427         /* prepare a packed array of only addresses. */
1428         i = 0;
1429         netdev_for_each_mc_addr(ha, netdev)
1430                 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1431
1432         spin_lock_bh(&hw->mbx_lock);
1433
1434         hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
1435
1436         spin_unlock_bh(&hw->mbx_lock);
1437         kfree(mta_list);
1438 }
1439
1440 /**
1441  * igbvf_set_uni - Configure unicast MAC filters
1442  * @netdev: network interface device structure
1443  *
1444  * This routine is responsible for configuring the hardware for proper
1445  * unicast filters.
1446  **/
1447 static int igbvf_set_uni(struct net_device *netdev)
1448 {
1449         struct igbvf_adapter *adapter = netdev_priv(netdev);
1450         struct e1000_hw *hw = &adapter->hw;
1451
1452         if (netdev_uc_count(netdev) > IGBVF_MAX_MAC_FILTERS) {
1453                 pr_err("Too many unicast filters - No Space\n");
1454                 return -ENOSPC;
1455         }
1456
1457         spin_lock_bh(&hw->mbx_lock);
1458
1459         /* Clear all unicast MAC filters */
1460         hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_CLR, NULL);
1461
1462         spin_unlock_bh(&hw->mbx_lock);
1463
1464         if (!netdev_uc_empty(netdev)) {
1465                 struct netdev_hw_addr *ha;
1466
1467                 /* Add MAC filters one by one */
1468                 netdev_for_each_uc_addr(ha, netdev) {
1469                         spin_lock_bh(&hw->mbx_lock);
1470
1471                         hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_ADD,
1472                                                 ha->addr);
1473
1474                         spin_unlock_bh(&hw->mbx_lock);
1475                         udelay(200);
1476                 }
1477         }
1478
1479         return 0;
1480 }
1481
1482 static void igbvf_set_rx_mode(struct net_device *netdev)
1483 {
1484         igbvf_set_multi(netdev);
1485         igbvf_set_uni(netdev);
1486 }
1487
1488 /**
1489  * igbvf_configure - configure the hardware for Rx and Tx
1490  * @adapter: private board structure
1491  **/
1492 static void igbvf_configure(struct igbvf_adapter *adapter)
1493 {
1494         igbvf_set_rx_mode(adapter->netdev);
1495
1496         igbvf_restore_vlan(adapter);
1497
1498         igbvf_configure_tx(adapter);
1499         igbvf_setup_srrctl(adapter);
1500         igbvf_configure_rx(adapter);
1501         igbvf_alloc_rx_buffers(adapter->rx_ring,
1502                                igbvf_desc_unused(adapter->rx_ring));
1503 }
1504
1505 /* igbvf_reset - bring the hardware into a known good state
1506  * @adapter: private board structure
1507  *
1508  * This function boots the hardware and enables some settings that
1509  * require a configuration cycle of the hardware - those cannot be
1510  * set/changed during runtime. After reset the device needs to be
1511  * properly configured for Rx, Tx etc.
1512  */
1513 static void igbvf_reset(struct igbvf_adapter *adapter)
1514 {
1515         struct e1000_mac_info *mac = &adapter->hw.mac;
1516         struct net_device *netdev = adapter->netdev;
1517         struct e1000_hw *hw = &adapter->hw;
1518
1519         spin_lock_bh(&hw->mbx_lock);
1520
1521         /* Allow time for pending master requests to run */
1522         if (mac->ops.reset_hw(hw))
1523                 dev_warn(&adapter->pdev->dev, "PF still resetting\n");
1524
1525         mac->ops.init_hw(hw);
1526
1527         spin_unlock_bh(&hw->mbx_lock);
1528
1529         if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1530                 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1531                        netdev->addr_len);
1532                 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1533                        netdev->addr_len);
1534         }
1535
1536         adapter->last_reset = jiffies;
1537 }
1538
1539 int igbvf_up(struct igbvf_adapter *adapter)
1540 {
1541         struct e1000_hw *hw = &adapter->hw;
1542
1543         /* hardware has been reset, we need to reload some things */
1544         igbvf_configure(adapter);
1545
1546         clear_bit(__IGBVF_DOWN, &adapter->state);
1547
1548         napi_enable(&adapter->rx_ring->napi);
1549         if (adapter->msix_entries)
1550                 igbvf_configure_msix(adapter);
1551
1552         /* Clear any pending interrupts. */
1553         er32(EICR);
1554         igbvf_irq_enable(adapter);
1555
1556         /* start the watchdog */
1557         hw->mac.get_link_status = 1;
1558         mod_timer(&adapter->watchdog_timer, jiffies + 1);
1559
1560         return 0;
1561 }
1562
1563 void igbvf_down(struct igbvf_adapter *adapter)
1564 {
1565         struct net_device *netdev = adapter->netdev;
1566         struct e1000_hw *hw = &adapter->hw;
1567         u32 rxdctl, txdctl;
1568
1569         /* signal that we're down so the interrupt handler does not
1570          * reschedule our watchdog timer
1571          */
1572         set_bit(__IGBVF_DOWN, &adapter->state);
1573
1574         /* disable receives in the hardware */
1575         rxdctl = er32(RXDCTL(0));
1576         ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1577
1578         netif_carrier_off(netdev);
1579         netif_stop_queue(netdev);
1580
1581         /* disable transmits in the hardware */
1582         txdctl = er32(TXDCTL(0));
1583         ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1584
1585         /* flush both disables and wait for them to finish */
1586         e1e_flush();
1587         msleep(10);
1588
1589         napi_disable(&adapter->rx_ring->napi);
1590
1591         igbvf_irq_disable(adapter);
1592
1593         del_timer_sync(&adapter->watchdog_timer);
1594
1595         /* record the stats before reset*/
1596         igbvf_update_stats(adapter);
1597
1598         adapter->link_speed = 0;
1599         adapter->link_duplex = 0;
1600
1601         igbvf_reset(adapter);
1602         igbvf_clean_tx_ring(adapter->tx_ring);
1603         igbvf_clean_rx_ring(adapter->rx_ring);
1604 }
1605
1606 void igbvf_reinit_locked(struct igbvf_adapter *adapter)
1607 {
1608         might_sleep();
1609         while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
1610                 usleep_range(1000, 2000);
1611         igbvf_down(adapter);
1612         igbvf_up(adapter);
1613         clear_bit(__IGBVF_RESETTING, &adapter->state);
1614 }
1615
1616 /**
1617  * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
1618  * @adapter: board private structure to initialize
1619  *
1620  * igbvf_sw_init initializes the Adapter private data structure.
1621  * Fields are initialized based on PCI device information and
1622  * OS network device settings (MTU size).
1623  **/
1624 static int igbvf_sw_init(struct igbvf_adapter *adapter)
1625 {
1626         struct net_device *netdev = adapter->netdev;
1627         s32 rc;
1628
1629         adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
1630         adapter->rx_ps_hdr_size = 0;
1631         adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1632         adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1633
1634         adapter->tx_int_delay = 8;
1635         adapter->tx_abs_int_delay = 32;
1636         adapter->rx_int_delay = 0;
1637         adapter->rx_abs_int_delay = 8;
1638         adapter->requested_itr = 3;
1639         adapter->current_itr = IGBVF_START_ITR;
1640
1641         /* Set various function pointers */
1642         adapter->ei->init_ops(&adapter->hw);
1643
1644         rc = adapter->hw.mac.ops.init_params(&adapter->hw);
1645         if (rc)
1646                 return rc;
1647
1648         rc = adapter->hw.mbx.ops.init_params(&adapter->hw);
1649         if (rc)
1650                 return rc;
1651
1652         igbvf_set_interrupt_capability(adapter);
1653
1654         if (igbvf_alloc_queues(adapter))
1655                 return -ENOMEM;
1656
1657         spin_lock_init(&adapter->tx_queue_lock);
1658
1659         /* Explicitly disable IRQ since the NIC can be in any state. */
1660         igbvf_irq_disable(adapter);
1661
1662         spin_lock_init(&adapter->stats_lock);
1663         spin_lock_init(&adapter->hw.mbx_lock);
1664
1665         set_bit(__IGBVF_DOWN, &adapter->state);
1666         return 0;
1667 }
1668
1669 static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter)
1670 {
1671         struct e1000_hw *hw = &adapter->hw;
1672
1673         adapter->stats.last_gprc = er32(VFGPRC);
1674         adapter->stats.last_gorc = er32(VFGORC);
1675         adapter->stats.last_gptc = er32(VFGPTC);
1676         adapter->stats.last_gotc = er32(VFGOTC);
1677         adapter->stats.last_mprc = er32(VFMPRC);
1678         adapter->stats.last_gotlbc = er32(VFGOTLBC);
1679         adapter->stats.last_gptlbc = er32(VFGPTLBC);
1680         adapter->stats.last_gorlbc = er32(VFGORLBC);
1681         adapter->stats.last_gprlbc = er32(VFGPRLBC);
1682
1683         adapter->stats.base_gprc = er32(VFGPRC);
1684         adapter->stats.base_gorc = er32(VFGORC);
1685         adapter->stats.base_gptc = er32(VFGPTC);
1686         adapter->stats.base_gotc = er32(VFGOTC);
1687         adapter->stats.base_mprc = er32(VFMPRC);
1688         adapter->stats.base_gotlbc = er32(VFGOTLBC);
1689         adapter->stats.base_gptlbc = er32(VFGPTLBC);
1690         adapter->stats.base_gorlbc = er32(VFGORLBC);
1691         adapter->stats.base_gprlbc = er32(VFGPRLBC);
1692 }
1693
1694 /**
1695  * igbvf_open - Called when a network interface is made active
1696  * @netdev: network interface device structure
1697  *
1698  * Returns 0 on success, negative value on failure
1699  *
1700  * The open entry point is called when a network interface is made
1701  * active by the system (IFF_UP).  At this point all resources needed
1702  * for transmit and receive operations are allocated, the interrupt
1703  * handler is registered with the OS, the watchdog timer is started,
1704  * and the stack is notified that the interface is ready.
1705  **/
1706 static int igbvf_open(struct net_device *netdev)
1707 {
1708         struct igbvf_adapter *adapter = netdev_priv(netdev);
1709         struct e1000_hw *hw = &adapter->hw;
1710         int err;
1711
1712         /* disallow open during test */
1713         if (test_bit(__IGBVF_TESTING, &adapter->state))
1714                 return -EBUSY;
1715
1716         /* allocate transmit descriptors */
1717         err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
1718         if (err)
1719                 goto err_setup_tx;
1720
1721         /* allocate receive descriptors */
1722         err = igbvf_setup_rx_resources(adapter, adapter->rx_ring);
1723         if (err)
1724                 goto err_setup_rx;
1725
1726         /* before we allocate an interrupt, we must be ready to handle it.
1727          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1728          * as soon as we call pci_request_irq, so we have to setup our
1729          * clean_rx handler before we do so.
1730          */
1731         igbvf_configure(adapter);
1732
1733         err = igbvf_request_irq(adapter);
1734         if (err)
1735                 goto err_req_irq;
1736
1737         /* From here on the code is the same as igbvf_up() */
1738         clear_bit(__IGBVF_DOWN, &adapter->state);
1739
1740         napi_enable(&adapter->rx_ring->napi);
1741
1742         /* clear any pending interrupts */
1743         er32(EICR);
1744
1745         igbvf_irq_enable(adapter);
1746
1747         /* start the watchdog */
1748         hw->mac.get_link_status = 1;
1749         mod_timer(&adapter->watchdog_timer, jiffies + 1);
1750
1751         return 0;
1752
1753 err_req_irq:
1754         igbvf_free_rx_resources(adapter->rx_ring);
1755 err_setup_rx:
1756         igbvf_free_tx_resources(adapter->tx_ring);
1757 err_setup_tx:
1758         igbvf_reset(adapter);
1759
1760         return err;
1761 }
1762
1763 /**
1764  * igbvf_close - Disables a network interface
1765  * @netdev: network interface device structure
1766  *
1767  * Returns 0, this is not allowed to fail
1768  *
1769  * The close entry point is called when an interface is de-activated
1770  * by the OS.  The hardware is still under the drivers control, but
1771  * needs to be disabled.  A global MAC reset is issued to stop the
1772  * hardware, and all transmit and receive resources are freed.
1773  **/
1774 static int igbvf_close(struct net_device *netdev)
1775 {
1776         struct igbvf_adapter *adapter = netdev_priv(netdev);
1777
1778         WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
1779         igbvf_down(adapter);
1780
1781         igbvf_free_irq(adapter);
1782
1783         igbvf_free_tx_resources(adapter->tx_ring);
1784         igbvf_free_rx_resources(adapter->rx_ring);
1785
1786         return 0;
1787 }
1788
1789 /**
1790  * igbvf_set_mac - Change the Ethernet Address of the NIC
1791  * @netdev: network interface device structure
1792  * @p: pointer to an address structure
1793  *
1794  * Returns 0 on success, negative on failure
1795  **/
1796 static int igbvf_set_mac(struct net_device *netdev, void *p)
1797 {
1798         struct igbvf_adapter *adapter = netdev_priv(netdev);
1799         struct e1000_hw *hw = &adapter->hw;
1800         struct sockaddr *addr = p;
1801
1802         if (!is_valid_ether_addr(addr->sa_data))
1803                 return -EADDRNOTAVAIL;
1804
1805         memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
1806
1807         spin_lock_bh(&hw->mbx_lock);
1808
1809         hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
1810
1811         spin_unlock_bh(&hw->mbx_lock);
1812
1813         if (!ether_addr_equal(addr->sa_data, hw->mac.addr))
1814                 return -EADDRNOTAVAIL;
1815
1816         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1817
1818         return 0;
1819 }
1820
1821 #define UPDATE_VF_COUNTER(reg, name) \
1822 { \
1823         u32 current_counter = er32(reg); \
1824         if (current_counter < adapter->stats.last_##name) \
1825                 adapter->stats.name += 0x100000000LL; \
1826         adapter->stats.last_##name = current_counter; \
1827         adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1828         adapter->stats.name |= current_counter; \
1829 }
1830
1831 /**
1832  * igbvf_update_stats - Update the board statistics counters
1833  * @adapter: board private structure
1834 **/
1835 void igbvf_update_stats(struct igbvf_adapter *adapter)
1836 {
1837         struct e1000_hw *hw = &adapter->hw;
1838         struct pci_dev *pdev = adapter->pdev;
1839
1840         /* Prevent stats update while adapter is being reset, link is down
1841          * or if the pci connection is down.
1842          */
1843         if (adapter->link_speed == 0)
1844                 return;
1845
1846         if (test_bit(__IGBVF_RESETTING, &adapter->state))
1847                 return;
1848
1849         if (pci_channel_offline(pdev))
1850                 return;
1851
1852         UPDATE_VF_COUNTER(VFGPRC, gprc);
1853         UPDATE_VF_COUNTER(VFGORC, gorc);
1854         UPDATE_VF_COUNTER(VFGPTC, gptc);
1855         UPDATE_VF_COUNTER(VFGOTC, gotc);
1856         UPDATE_VF_COUNTER(VFMPRC, mprc);
1857         UPDATE_VF_COUNTER(VFGOTLBC, gotlbc);
1858         UPDATE_VF_COUNTER(VFGPTLBC, gptlbc);
1859         UPDATE_VF_COUNTER(VFGORLBC, gorlbc);
1860         UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
1861
1862         /* Fill out the OS statistics structure */
1863         adapter->netdev->stats.multicast = adapter->stats.mprc;
1864 }
1865
1866 static void igbvf_print_link_info(struct igbvf_adapter *adapter)
1867 {
1868         dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
1869                  adapter->link_speed,
1870                  adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
1871 }
1872
1873 static bool igbvf_has_link(struct igbvf_adapter *adapter)
1874 {
1875         struct e1000_hw *hw = &adapter->hw;
1876         s32 ret_val = E1000_SUCCESS;
1877         bool link_active;
1878
1879         /* If interface is down, stay link down */
1880         if (test_bit(__IGBVF_DOWN, &adapter->state))
1881                 return false;
1882
1883         spin_lock_bh(&hw->mbx_lock);
1884
1885         ret_val = hw->mac.ops.check_for_link(hw);
1886
1887         spin_unlock_bh(&hw->mbx_lock);
1888
1889         link_active = !hw->mac.get_link_status;
1890
1891         /* if check for link returns error we will need to reset */
1892         if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ)))
1893                 schedule_work(&adapter->reset_task);
1894
1895         return link_active;
1896 }
1897
1898 /**
1899  * igbvf_watchdog - Timer Call-back
1900  * @t: timer list pointer containing private struct
1901  **/
1902 static void igbvf_watchdog(struct timer_list *t)
1903 {
1904         struct igbvf_adapter *adapter = from_timer(adapter, t, watchdog_timer);
1905
1906         /* Do the rest outside of interrupt context */
1907         schedule_work(&adapter->watchdog_task);
1908 }
1909
1910 static void igbvf_watchdog_task(struct work_struct *work)
1911 {
1912         struct igbvf_adapter *adapter = container_of(work,
1913                                                      struct igbvf_adapter,
1914                                                      watchdog_task);
1915         struct net_device *netdev = adapter->netdev;
1916         struct e1000_mac_info *mac = &adapter->hw.mac;
1917         struct igbvf_ring *tx_ring = adapter->tx_ring;
1918         struct e1000_hw *hw = &adapter->hw;
1919         u32 link;
1920         int tx_pending = 0;
1921
1922         link = igbvf_has_link(adapter);
1923
1924         if (link) {
1925                 if (!netif_carrier_ok(netdev)) {
1926                         mac->ops.get_link_up_info(&adapter->hw,
1927                                                   &adapter->link_speed,
1928                                                   &adapter->link_duplex);
1929                         igbvf_print_link_info(adapter);
1930
1931                         netif_carrier_on(netdev);
1932                         netif_wake_queue(netdev);
1933                 }
1934         } else {
1935                 if (netif_carrier_ok(netdev)) {
1936                         adapter->link_speed = 0;
1937                         adapter->link_duplex = 0;
1938                         dev_info(&adapter->pdev->dev, "Link is Down\n");
1939                         netif_carrier_off(netdev);
1940                         netif_stop_queue(netdev);
1941                 }
1942         }
1943
1944         if (netif_carrier_ok(netdev)) {
1945                 igbvf_update_stats(adapter);
1946         } else {
1947                 tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
1948                               tx_ring->count);
1949                 if (tx_pending) {
1950                         /* We've lost link, so the controller stops DMA,
1951                          * but we've got queued Tx work that's never going
1952                          * to get done, so reset controller to flush Tx.
1953                          * (Do the reset outside of interrupt context).
1954                          */
1955                         adapter->tx_timeout_count++;
1956                         schedule_work(&adapter->reset_task);
1957                 }
1958         }
1959
1960         /* Cause software interrupt to ensure Rx ring is cleaned */
1961         ew32(EICS, adapter->rx_ring->eims_value);
1962
1963         /* Reset the timer */
1964         if (!test_bit(__IGBVF_DOWN, &adapter->state))
1965                 mod_timer(&adapter->watchdog_timer,
1966                           round_jiffies(jiffies + (2 * HZ)));
1967 }
1968
1969 #define IGBVF_TX_FLAGS_CSUM             0x00000001
1970 #define IGBVF_TX_FLAGS_VLAN             0x00000002
1971 #define IGBVF_TX_FLAGS_TSO              0x00000004
1972 #define IGBVF_TX_FLAGS_IPV4             0x00000008
1973 #define IGBVF_TX_FLAGS_VLAN_MASK        0xffff0000
1974 #define IGBVF_TX_FLAGS_VLAN_SHIFT       16
1975
1976 static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens,
1977                               u32 type_tucmd, u32 mss_l4len_idx)
1978 {
1979         struct e1000_adv_tx_context_desc *context_desc;
1980         struct igbvf_buffer *buffer_info;
1981         u16 i = tx_ring->next_to_use;
1982
1983         context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1984         buffer_info = &tx_ring->buffer_info[i];
1985
1986         i++;
1987         tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1988
1989         /* set bits to identify this as an advanced context descriptor */
1990         type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
1991
1992         context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
1993         context_desc->seqnum_seed       = 0;
1994         context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
1995         context_desc->mss_l4len_idx     = cpu_to_le32(mss_l4len_idx);
1996
1997         buffer_info->time_stamp = jiffies;
1998         buffer_info->dma = 0;
1999 }
2000
2001 static int igbvf_tso(struct igbvf_ring *tx_ring,
2002                      struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2003 {
2004         u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
2005         union {
2006                 struct iphdr *v4;
2007                 struct ipv6hdr *v6;
2008                 unsigned char *hdr;
2009         } ip;
2010         union {
2011                 struct tcphdr *tcp;
2012                 unsigned char *hdr;
2013         } l4;
2014         u32 paylen, l4_offset;
2015         int err;
2016
2017         if (skb->ip_summed != CHECKSUM_PARTIAL)
2018                 return 0;
2019
2020         if (!skb_is_gso(skb))
2021                 return 0;
2022
2023         err = skb_cow_head(skb, 0);
2024         if (err < 0)
2025                 return err;
2026
2027         ip.hdr = skb_network_header(skb);
2028         l4.hdr = skb_checksum_start(skb);
2029
2030         /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2031         type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
2032
2033         /* initialize outer IP header fields */
2034         if (ip.v4->version == 4) {
2035                 unsigned char *csum_start = skb_checksum_start(skb);
2036                 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
2037
2038                 /* IP header will have to cancel out any data that
2039                  * is not a part of the outer IP header
2040                  */
2041                 ip.v4->check = csum_fold(csum_partial(trans_start,
2042                                                       csum_start - trans_start,
2043                                                       0));
2044                 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
2045
2046                 ip.v4->tot_len = 0;
2047         } else {
2048                 ip.v6->payload_len = 0;
2049         }
2050
2051         /* determine offset of inner transport header */
2052         l4_offset = l4.hdr - skb->data;
2053
2054         /* compute length of segmentation header */
2055         *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2056
2057         /* remove payload length from inner checksum */
2058         paylen = skb->len - l4_offset;
2059         csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2060
2061         /* MSS L4LEN IDX */
2062         mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
2063         mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
2064
2065         /* VLAN MACLEN IPLEN */
2066         vlan_macip_lens = l4.hdr - ip.hdr;
2067         vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
2068         vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
2069
2070         igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
2071
2072         return 1;
2073 }
2074
2075 static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb,
2076                           u32 tx_flags, __be16 protocol)
2077 {
2078         u32 vlan_macip_lens = 0;
2079         u32 type_tucmd = 0;
2080
2081         if (skb->ip_summed != CHECKSUM_PARTIAL) {
2082 csum_failed:
2083                 if (!(tx_flags & IGBVF_TX_FLAGS_VLAN))
2084                         return false;
2085                 goto no_csum;
2086         }
2087
2088         switch (skb->csum_offset) {
2089         case offsetof(struct tcphdr, check):
2090                 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
2091                 fallthrough;
2092         case offsetof(struct udphdr, check):
2093                 break;
2094         case offsetof(struct sctphdr, checksum):
2095                 /* validate that this is actually an SCTP request */
2096                 if (skb_csum_is_sctp(skb)) {
2097                         type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
2098                         break;
2099                 }
2100                 fallthrough;
2101         default:
2102                 skb_checksum_help(skb);
2103                 goto csum_failed;
2104         }
2105
2106         vlan_macip_lens = skb_checksum_start_offset(skb) -
2107                           skb_network_offset(skb);
2108 no_csum:
2109         vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
2110         vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
2111
2112         igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
2113         return true;
2114 }
2115
2116 static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
2117 {
2118         struct igbvf_adapter *adapter = netdev_priv(netdev);
2119
2120         /* there is enough descriptors then we don't need to worry  */
2121         if (igbvf_desc_unused(adapter->tx_ring) >= size)
2122                 return 0;
2123
2124         netif_stop_queue(netdev);
2125
2126         /* Herbert's original patch had:
2127          *  smp_mb__after_netif_stop_queue();
2128          * but since that doesn't exist yet, just open code it.
2129          */
2130         smp_mb();
2131
2132         /* We need to check again just in case room has been made available */
2133         if (igbvf_desc_unused(adapter->tx_ring) < size)
2134                 return -EBUSY;
2135
2136         netif_wake_queue(netdev);
2137
2138         ++adapter->restart_queue;
2139         return 0;
2140 }
2141
2142 #define IGBVF_MAX_TXD_PWR       16
2143 #define IGBVF_MAX_DATA_PER_TXD  (1u << IGBVF_MAX_TXD_PWR)
2144
2145 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2146                                    struct igbvf_ring *tx_ring,
2147                                    struct sk_buff *skb)
2148 {
2149         struct igbvf_buffer *buffer_info;
2150         struct pci_dev *pdev = adapter->pdev;
2151         unsigned int len = skb_headlen(skb);
2152         unsigned int count = 0, i;
2153         unsigned int f;
2154
2155         i = tx_ring->next_to_use;
2156
2157         buffer_info = &tx_ring->buffer_info[i];
2158         BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2159         buffer_info->length = len;
2160         /* set time_stamp *before* dma to help avoid a possible race */
2161         buffer_info->time_stamp = jiffies;
2162         buffer_info->mapped_as_page = false;
2163         buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
2164                                           DMA_TO_DEVICE);
2165         if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2166                 goto dma_error;
2167
2168         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2169                 const skb_frag_t *frag;
2170
2171                 count++;
2172                 i++;
2173                 if (i == tx_ring->count)
2174                         i = 0;
2175
2176                 frag = &skb_shinfo(skb)->frags[f];
2177                 len = skb_frag_size(frag);
2178
2179                 buffer_info = &tx_ring->buffer_info[i];
2180                 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2181                 buffer_info->length = len;
2182                 buffer_info->time_stamp = jiffies;
2183                 buffer_info->mapped_as_page = true;
2184                 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
2185                                                     DMA_TO_DEVICE);
2186                 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2187                         goto dma_error;
2188         }
2189
2190         tx_ring->buffer_info[i].skb = skb;
2191
2192         return ++count;
2193
2194 dma_error:
2195         dev_err(&pdev->dev, "TX DMA map failed\n");
2196
2197         /* clear timestamp and dma mappings for failed buffer_info mapping */
2198         buffer_info->dma = 0;
2199         buffer_info->time_stamp = 0;
2200         buffer_info->length = 0;
2201         buffer_info->mapped_as_page = false;
2202         if (count)
2203                 count--;
2204
2205         /* clear timestamp and dma mappings for remaining portion of packet */
2206         while (count--) {
2207                 if (i == 0)
2208                         i += tx_ring->count;
2209                 i--;
2210                 buffer_info = &tx_ring->buffer_info[i];
2211                 igbvf_put_txbuf(adapter, buffer_info);
2212         }
2213
2214         return 0;
2215 }
2216
2217 static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2218                                       struct igbvf_ring *tx_ring,
2219                                       int tx_flags, int count,
2220                                       unsigned int first, u32 paylen,
2221                                       u8 hdr_len)
2222 {
2223         union e1000_adv_tx_desc *tx_desc = NULL;
2224         struct igbvf_buffer *buffer_info;
2225         u32 olinfo_status = 0, cmd_type_len;
2226         unsigned int i;
2227
2228         cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2229                         E1000_ADVTXD_DCMD_DEXT);
2230
2231         if (tx_flags & IGBVF_TX_FLAGS_VLAN)
2232                 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2233
2234         if (tx_flags & IGBVF_TX_FLAGS_TSO) {
2235                 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2236
2237                 /* insert tcp checksum */
2238                 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2239
2240                 /* insert ip checksum */
2241                 if (tx_flags & IGBVF_TX_FLAGS_IPV4)
2242                         olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2243
2244         } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) {
2245                 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2246         }
2247
2248         olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2249
2250         i = tx_ring->next_to_use;
2251         while (count--) {
2252                 buffer_info = &tx_ring->buffer_info[i];
2253                 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
2254                 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2255                 tx_desc->read.cmd_type_len =
2256                          cpu_to_le32(cmd_type_len | buffer_info->length);
2257                 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2258                 i++;
2259                 if (i == tx_ring->count)
2260                         i = 0;
2261         }
2262
2263         tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2264         /* Force memory writes to complete before letting h/w
2265          * know there are new descriptors to fetch.  (Only
2266          * applicable for weak-ordered memory model archs,
2267          * such as IA-64).
2268          */
2269         wmb();
2270
2271         tx_ring->buffer_info[first].next_to_watch = tx_desc;
2272         tx_ring->next_to_use = i;
2273         writel(i, adapter->hw.hw_addr + tx_ring->tail);
2274 }
2275
2276 static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2277                                              struct net_device *netdev,
2278                                              struct igbvf_ring *tx_ring)
2279 {
2280         struct igbvf_adapter *adapter = netdev_priv(netdev);
2281         unsigned int first, tx_flags = 0;
2282         u8 hdr_len = 0;
2283         int count = 0;
2284         int tso = 0;
2285         __be16 protocol = vlan_get_protocol(skb);
2286
2287         if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2288                 dev_kfree_skb_any(skb);
2289                 return NETDEV_TX_OK;
2290         }
2291
2292         if (skb->len <= 0) {
2293                 dev_kfree_skb_any(skb);
2294                 return NETDEV_TX_OK;
2295         }
2296
2297         /* need: count + 4 desc gap to keep tail from touching
2298          *       + 2 desc gap to keep tail from touching head,
2299          *       + 1 desc for skb->data,
2300          *       + 1 desc for context descriptor,
2301          * head, otherwise try next time
2302          */
2303         if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
2304                 /* this is a hard error */
2305                 return NETDEV_TX_BUSY;
2306         }
2307
2308         if (skb_vlan_tag_present(skb)) {
2309                 tx_flags |= IGBVF_TX_FLAGS_VLAN;
2310                 tx_flags |= (skb_vlan_tag_get(skb) <<
2311                              IGBVF_TX_FLAGS_VLAN_SHIFT);
2312         }
2313
2314         if (protocol == htons(ETH_P_IP))
2315                 tx_flags |= IGBVF_TX_FLAGS_IPV4;
2316
2317         first = tx_ring->next_to_use;
2318
2319         tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len);
2320         if (unlikely(tso < 0)) {
2321                 dev_kfree_skb_any(skb);
2322                 return NETDEV_TX_OK;
2323         }
2324
2325         if (tso)
2326                 tx_flags |= IGBVF_TX_FLAGS_TSO;
2327         else if (igbvf_tx_csum(tx_ring, skb, tx_flags, protocol) &&
2328                  (skb->ip_summed == CHECKSUM_PARTIAL))
2329                 tx_flags |= IGBVF_TX_FLAGS_CSUM;
2330
2331         /* count reflects descriptors mapped, if 0 then mapping error
2332          * has occurred and we need to rewind the descriptor queue
2333          */
2334         count = igbvf_tx_map_adv(adapter, tx_ring, skb);
2335
2336         if (count) {
2337                 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
2338                                    first, skb->len, hdr_len);
2339                 /* Make sure there is space in the ring for the next send. */
2340                 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
2341         } else {
2342                 dev_kfree_skb_any(skb);
2343                 tx_ring->buffer_info[first].time_stamp = 0;
2344                 tx_ring->next_to_use = first;
2345         }
2346
2347         return NETDEV_TX_OK;
2348 }
2349
2350 static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
2351                                     struct net_device *netdev)
2352 {
2353         struct igbvf_adapter *adapter = netdev_priv(netdev);
2354         struct igbvf_ring *tx_ring;
2355
2356         if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2357                 dev_kfree_skb_any(skb);
2358                 return NETDEV_TX_OK;
2359         }
2360
2361         tx_ring = &adapter->tx_ring[0];
2362
2363         return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
2364 }
2365
2366 /**
2367  * igbvf_tx_timeout - Respond to a Tx Hang
2368  * @netdev: network interface device structure
2369  * @txqueue: queue timing out (unused)
2370  **/
2371 static void igbvf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
2372 {
2373         struct igbvf_adapter *adapter = netdev_priv(netdev);
2374
2375         /* Do the reset outside of interrupt context */
2376         adapter->tx_timeout_count++;
2377         schedule_work(&adapter->reset_task);
2378 }
2379
2380 static void igbvf_reset_task(struct work_struct *work)
2381 {
2382         struct igbvf_adapter *adapter;
2383
2384         adapter = container_of(work, struct igbvf_adapter, reset_task);
2385
2386         igbvf_reinit_locked(adapter);
2387 }
2388
2389 /**
2390  * igbvf_change_mtu - Change the Maximum Transfer Unit
2391  * @netdev: network interface device structure
2392  * @new_mtu: new value for maximum frame size
2393  *
2394  * Returns 0 on success, negative on failure
2395  **/
2396 static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
2397 {
2398         struct igbvf_adapter *adapter = netdev_priv(netdev);
2399         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2400
2401         while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
2402                 usleep_range(1000, 2000);
2403         /* igbvf_down has a dependency on max_frame_size */
2404         adapter->max_frame_size = max_frame;
2405         if (netif_running(netdev))
2406                 igbvf_down(adapter);
2407
2408         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2409          * means we reserve 2 more, this pushes us to allocate from the next
2410          * larger slab size.
2411          * i.e. RXBUFFER_2048 --> size-4096 slab
2412          * However with the new *_jumbo_rx* routines, jumbo receives will use
2413          * fragmented skbs
2414          */
2415
2416         if (max_frame <= 1024)
2417                 adapter->rx_buffer_len = 1024;
2418         else if (max_frame <= 2048)
2419                 adapter->rx_buffer_len = 2048;
2420         else
2421 #if (PAGE_SIZE / 2) > 16384
2422                 adapter->rx_buffer_len = 16384;
2423 #else
2424                 adapter->rx_buffer_len = PAGE_SIZE / 2;
2425 #endif
2426
2427         /* adjust allocation if LPE protects us, and we aren't using SBP */
2428         if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
2429             (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
2430                 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
2431                                          ETH_FCS_LEN;
2432
2433         netdev_dbg(netdev, "changing MTU from %d to %d\n",
2434                    netdev->mtu, new_mtu);
2435         netdev->mtu = new_mtu;
2436
2437         if (netif_running(netdev))
2438                 igbvf_up(adapter);
2439         else
2440                 igbvf_reset(adapter);
2441
2442         clear_bit(__IGBVF_RESETTING, &adapter->state);
2443
2444         return 0;
2445 }
2446
2447 static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2448 {
2449         switch (cmd) {
2450         default:
2451                 return -EOPNOTSUPP;
2452         }
2453 }
2454
2455 static int igbvf_suspend(struct device *dev_d)
2456 {
2457         struct net_device *netdev = dev_get_drvdata(dev_d);
2458         struct igbvf_adapter *adapter = netdev_priv(netdev);
2459
2460         netif_device_detach(netdev);
2461
2462         if (netif_running(netdev)) {
2463                 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
2464                 igbvf_down(adapter);
2465                 igbvf_free_irq(adapter);
2466         }
2467
2468         return 0;
2469 }
2470
2471 static int __maybe_unused igbvf_resume(struct device *dev_d)
2472 {
2473         struct pci_dev *pdev = to_pci_dev(dev_d);
2474         struct net_device *netdev = pci_get_drvdata(pdev);
2475         struct igbvf_adapter *adapter = netdev_priv(netdev);
2476         u32 err;
2477
2478         pci_set_master(pdev);
2479
2480         if (netif_running(netdev)) {
2481                 err = igbvf_request_irq(adapter);
2482                 if (err)
2483                         return err;
2484         }
2485
2486         igbvf_reset(adapter);
2487
2488         if (netif_running(netdev))
2489                 igbvf_up(adapter);
2490
2491         netif_device_attach(netdev);
2492
2493         return 0;
2494 }
2495
2496 static void igbvf_shutdown(struct pci_dev *pdev)
2497 {
2498         igbvf_suspend(&pdev->dev);
2499 }
2500
2501 #ifdef CONFIG_NET_POLL_CONTROLLER
2502 /* Polling 'interrupt' - used by things like netconsole to send skbs
2503  * without having to re-enable interrupts. It's not called while
2504  * the interrupt routine is executing.
2505  */
2506 static void igbvf_netpoll(struct net_device *netdev)
2507 {
2508         struct igbvf_adapter *adapter = netdev_priv(netdev);
2509
2510         disable_irq(adapter->pdev->irq);
2511
2512         igbvf_clean_tx_irq(adapter->tx_ring);
2513
2514         enable_irq(adapter->pdev->irq);
2515 }
2516 #endif
2517
2518 /**
2519  * igbvf_io_error_detected - called when PCI error is detected
2520  * @pdev: Pointer to PCI device
2521  * @state: The current pci connection state
2522  *
2523  * This function is called after a PCI bus error affecting
2524  * this device has been detected.
2525  */
2526 static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
2527                                                 pci_channel_state_t state)
2528 {
2529         struct net_device *netdev = pci_get_drvdata(pdev);
2530         struct igbvf_adapter *adapter = netdev_priv(netdev);
2531
2532         netif_device_detach(netdev);
2533
2534         if (state == pci_channel_io_perm_failure)
2535                 return PCI_ERS_RESULT_DISCONNECT;
2536
2537         if (netif_running(netdev))
2538                 igbvf_down(adapter);
2539         pci_disable_device(pdev);
2540
2541         /* Request a slot slot reset. */
2542         return PCI_ERS_RESULT_NEED_RESET;
2543 }
2544
2545 /**
2546  * igbvf_io_slot_reset - called after the pci bus has been reset.
2547  * @pdev: Pointer to PCI device
2548  *
2549  * Restart the card from scratch, as if from a cold-boot. Implementation
2550  * resembles the first-half of the igbvf_resume routine.
2551  */
2552 static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev)
2553 {
2554         struct net_device *netdev = pci_get_drvdata(pdev);
2555         struct igbvf_adapter *adapter = netdev_priv(netdev);
2556
2557         if (pci_enable_device_mem(pdev)) {
2558                 dev_err(&pdev->dev,
2559                         "Cannot re-enable PCI device after reset.\n");
2560                 return PCI_ERS_RESULT_DISCONNECT;
2561         }
2562         pci_set_master(pdev);
2563
2564         igbvf_reset(adapter);
2565
2566         return PCI_ERS_RESULT_RECOVERED;
2567 }
2568
2569 /**
2570  * igbvf_io_resume - called when traffic can start flowing again.
2571  * @pdev: Pointer to PCI device
2572  *
2573  * This callback is called when the error recovery driver tells us that
2574  * its OK to resume normal operation. Implementation resembles the
2575  * second-half of the igbvf_resume routine.
2576  */
2577 static void igbvf_io_resume(struct pci_dev *pdev)
2578 {
2579         struct net_device *netdev = pci_get_drvdata(pdev);
2580         struct igbvf_adapter *adapter = netdev_priv(netdev);
2581
2582         if (netif_running(netdev)) {
2583                 if (igbvf_up(adapter)) {
2584                         dev_err(&pdev->dev,
2585                                 "can't bring device back up after reset\n");
2586                         return;
2587                 }
2588         }
2589
2590         netif_device_attach(netdev);
2591 }
2592
2593 static void igbvf_print_device_info(struct igbvf_adapter *adapter)
2594 {
2595         struct e1000_hw *hw = &adapter->hw;
2596         struct net_device *netdev = adapter->netdev;
2597         struct pci_dev *pdev = adapter->pdev;
2598
2599         if (hw->mac.type == e1000_vfadapt_i350)
2600                 dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n");
2601         else
2602                 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
2603         dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
2604 }
2605
2606 static int igbvf_set_features(struct net_device *netdev,
2607                               netdev_features_t features)
2608 {
2609         struct igbvf_adapter *adapter = netdev_priv(netdev);
2610
2611         if (features & NETIF_F_RXCSUM)
2612                 adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
2613         else
2614                 adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
2615
2616         return 0;
2617 }
2618
2619 #define IGBVF_MAX_MAC_HDR_LEN           127
2620 #define IGBVF_MAX_NETWORK_HDR_LEN       511
2621
2622 static netdev_features_t
2623 igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
2624                      netdev_features_t features)
2625 {
2626         unsigned int network_hdr_len, mac_hdr_len;
2627
2628         /* Make certain the headers can be described by a context descriptor */
2629         mac_hdr_len = skb_network_header(skb) - skb->data;
2630         if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN))
2631                 return features & ~(NETIF_F_HW_CSUM |
2632                                     NETIF_F_SCTP_CRC |
2633                                     NETIF_F_HW_VLAN_CTAG_TX |
2634                                     NETIF_F_TSO |
2635                                     NETIF_F_TSO6);
2636
2637         network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2638         if (unlikely(network_hdr_len >  IGBVF_MAX_NETWORK_HDR_LEN))
2639                 return features & ~(NETIF_F_HW_CSUM |
2640                                     NETIF_F_SCTP_CRC |
2641                                     NETIF_F_TSO |
2642                                     NETIF_F_TSO6);
2643
2644         /* We can only support IPV4 TSO in tunnels if we can mangle the
2645          * inner IP ID field, so strip TSO if MANGLEID is not supported.
2646          */
2647         if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2648                 features &= ~NETIF_F_TSO;
2649
2650         return features;
2651 }
2652
2653 static const struct net_device_ops igbvf_netdev_ops = {
2654         .ndo_open               = igbvf_open,
2655         .ndo_stop               = igbvf_close,
2656         .ndo_start_xmit         = igbvf_xmit_frame,
2657         .ndo_set_rx_mode        = igbvf_set_rx_mode,
2658         .ndo_set_mac_address    = igbvf_set_mac,
2659         .ndo_change_mtu         = igbvf_change_mtu,
2660         .ndo_do_ioctl           = igbvf_ioctl,
2661         .ndo_tx_timeout         = igbvf_tx_timeout,
2662         .ndo_vlan_rx_add_vid    = igbvf_vlan_rx_add_vid,
2663         .ndo_vlan_rx_kill_vid   = igbvf_vlan_rx_kill_vid,
2664 #ifdef CONFIG_NET_POLL_CONTROLLER
2665         .ndo_poll_controller    = igbvf_netpoll,
2666 #endif
2667         .ndo_set_features       = igbvf_set_features,
2668         .ndo_features_check     = igbvf_features_check,
2669 };
2670
2671 /**
2672  * igbvf_probe - Device Initialization Routine
2673  * @pdev: PCI device information struct
2674  * @ent: entry in igbvf_pci_tbl
2675  *
2676  * Returns 0 on success, negative on failure
2677  *
2678  * igbvf_probe initializes an adapter identified by a pci_dev structure.
2679  * The OS initialization, configuring of the adapter private structure,
2680  * and a hardware reset occur.
2681  **/
2682 static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2683 {
2684         struct net_device *netdev;
2685         struct igbvf_adapter *adapter;
2686         struct e1000_hw *hw;
2687         const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
2688
2689         static int cards_found;
2690         int err, pci_using_dac;
2691
2692         err = pci_enable_device_mem(pdev);
2693         if (err)
2694                 return err;
2695
2696         pci_using_dac = 0;
2697         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2698         if (!err) {
2699                 pci_using_dac = 1;
2700         } else {
2701                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2702                 if (err) {
2703                         dev_err(&pdev->dev,
2704                                 "No usable DMA configuration, aborting\n");
2705                         goto err_dma;
2706                 }
2707         }
2708
2709         err = pci_request_regions(pdev, igbvf_driver_name);
2710         if (err)
2711                 goto err_pci_reg;
2712
2713         pci_set_master(pdev);
2714
2715         err = -ENOMEM;
2716         netdev = alloc_etherdev(sizeof(struct igbvf_adapter));
2717         if (!netdev)
2718                 goto err_alloc_etherdev;
2719
2720         SET_NETDEV_DEV(netdev, &pdev->dev);
2721
2722         pci_set_drvdata(pdev, netdev);
2723         adapter = netdev_priv(netdev);
2724         hw = &adapter->hw;
2725         adapter->netdev = netdev;
2726         adapter->pdev = pdev;
2727         adapter->ei = ei;
2728         adapter->pba = ei->pba;
2729         adapter->flags = ei->flags;
2730         adapter->hw.back = adapter;
2731         adapter->hw.mac.type = ei->mac;
2732         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2733
2734         /* PCI config space info */
2735
2736         hw->vendor_id = pdev->vendor;
2737         hw->device_id = pdev->device;
2738         hw->subsystem_vendor_id = pdev->subsystem_vendor;
2739         hw->subsystem_device_id = pdev->subsystem_device;
2740         hw->revision_id = pdev->revision;
2741
2742         err = -EIO;
2743         adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
2744                                       pci_resource_len(pdev, 0));
2745
2746         if (!adapter->hw.hw_addr)
2747                 goto err_ioremap;
2748
2749         if (ei->get_variants) {
2750                 err = ei->get_variants(adapter);
2751                 if (err)
2752                         goto err_get_variants;
2753         }
2754
2755         /* setup adapter struct */
2756         err = igbvf_sw_init(adapter);
2757         if (err)
2758                 goto err_sw_init;
2759
2760         /* construct the net_device struct */
2761         netdev->netdev_ops = &igbvf_netdev_ops;
2762
2763         igbvf_set_ethtool_ops(netdev);
2764         netdev->watchdog_timeo = 5 * HZ;
2765         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2766
2767         adapter->bd_number = cards_found++;
2768
2769         netdev->hw_features = NETIF_F_SG |
2770                               NETIF_F_TSO |
2771                               NETIF_F_TSO6 |
2772                               NETIF_F_RXCSUM |
2773                               NETIF_F_HW_CSUM |
2774                               NETIF_F_SCTP_CRC;
2775
2776 #define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
2777                                     NETIF_F_GSO_GRE_CSUM | \
2778                                     NETIF_F_GSO_IPXIP4 | \
2779                                     NETIF_F_GSO_IPXIP6 | \
2780                                     NETIF_F_GSO_UDP_TUNNEL | \
2781                                     NETIF_F_GSO_UDP_TUNNEL_CSUM)
2782
2783         netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES;
2784         netdev->hw_features |= NETIF_F_GSO_PARTIAL |
2785                                IGBVF_GSO_PARTIAL_FEATURES;
2786
2787         netdev->features = netdev->hw_features;
2788
2789         if (pci_using_dac)
2790                 netdev->features |= NETIF_F_HIGHDMA;
2791
2792         netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
2793         netdev->mpls_features |= NETIF_F_HW_CSUM;
2794         netdev->hw_enc_features |= netdev->vlan_features;
2795
2796         /* set this bit last since it cannot be part of vlan_features */
2797         netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
2798                             NETIF_F_HW_VLAN_CTAG_RX |
2799                             NETIF_F_HW_VLAN_CTAG_TX;
2800
2801         /* MTU range: 68 - 9216 */
2802         netdev->min_mtu = ETH_MIN_MTU;
2803         netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
2804
2805         spin_lock_bh(&hw->mbx_lock);
2806
2807         /*reset the controller to put the device in a known good state */
2808         err = hw->mac.ops.reset_hw(hw);
2809         if (err) {
2810                 dev_info(&pdev->dev,
2811                          "PF still in reset state. Is the PF interface up?\n");
2812         } else {
2813                 err = hw->mac.ops.read_mac_addr(hw);
2814                 if (err)
2815                         dev_info(&pdev->dev, "Error reading MAC address.\n");
2816                 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2817                         dev_info(&pdev->dev,
2818                                  "MAC address not assigned by administrator.\n");
2819                 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2820                        netdev->addr_len);
2821         }
2822
2823         spin_unlock_bh(&hw->mbx_lock);
2824
2825         if (!is_valid_ether_addr(netdev->dev_addr)) {
2826                 dev_info(&pdev->dev, "Assigning random MAC address.\n");
2827                 eth_hw_addr_random(netdev);
2828                 memcpy(adapter->hw.mac.addr, netdev->dev_addr,
2829                        netdev->addr_len);
2830         }
2831
2832         timer_setup(&adapter->watchdog_timer, igbvf_watchdog, 0);
2833
2834         INIT_WORK(&adapter->reset_task, igbvf_reset_task);
2835         INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
2836
2837         /* ring size defaults */
2838         adapter->rx_ring->count = 1024;
2839         adapter->tx_ring->count = 1024;
2840
2841         /* reset the hardware with the new settings */
2842         igbvf_reset(adapter);
2843
2844         /* set hardware-specific flags */
2845         if (adapter->hw.mac.type == e1000_vfadapt_i350)
2846                 adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP;
2847
2848         strcpy(netdev->name, "eth%d");
2849         err = register_netdev(netdev);
2850         if (err)
2851                 goto err_hw_init;
2852
2853         /* tell the stack to leave us alone until igbvf_open() is called */
2854         netif_carrier_off(netdev);
2855         netif_stop_queue(netdev);
2856
2857         igbvf_print_device_info(adapter);
2858
2859         igbvf_initialize_last_counter_stats(adapter);
2860
2861         return 0;
2862
2863 err_hw_init:
2864         kfree(adapter->tx_ring);
2865         kfree(adapter->rx_ring);
2866 err_sw_init:
2867         igbvf_reset_interrupt_capability(adapter);
2868 err_get_variants:
2869         iounmap(adapter->hw.hw_addr);
2870 err_ioremap:
2871         free_netdev(netdev);
2872 err_alloc_etherdev:
2873         pci_release_regions(pdev);
2874 err_pci_reg:
2875 err_dma:
2876         pci_disable_device(pdev);
2877         return err;
2878 }
2879
2880 /**
2881  * igbvf_remove - Device Removal Routine
2882  * @pdev: PCI device information struct
2883  *
2884  * igbvf_remove is called by the PCI subsystem to alert the driver
2885  * that it should release a PCI device.  The could be caused by a
2886  * Hot-Plug event, or because the driver is going to be removed from
2887  * memory.
2888  **/
2889 static void igbvf_remove(struct pci_dev *pdev)
2890 {
2891         struct net_device *netdev = pci_get_drvdata(pdev);
2892         struct igbvf_adapter *adapter = netdev_priv(netdev);
2893         struct e1000_hw *hw = &adapter->hw;
2894
2895         /* The watchdog timer may be rescheduled, so explicitly
2896          * disable it from being rescheduled.
2897          */
2898         set_bit(__IGBVF_DOWN, &adapter->state);
2899         del_timer_sync(&adapter->watchdog_timer);
2900
2901         cancel_work_sync(&adapter->reset_task);
2902         cancel_work_sync(&adapter->watchdog_task);
2903
2904         unregister_netdev(netdev);
2905
2906         igbvf_reset_interrupt_capability(adapter);
2907
2908         /* it is important to delete the NAPI struct prior to freeing the
2909          * Rx ring so that you do not end up with null pointer refs
2910          */
2911         netif_napi_del(&adapter->rx_ring->napi);
2912         kfree(adapter->tx_ring);
2913         kfree(adapter->rx_ring);
2914
2915         iounmap(hw->hw_addr);
2916         if (hw->flash_address)
2917                 iounmap(hw->flash_address);
2918         pci_release_regions(pdev);
2919
2920         free_netdev(netdev);
2921
2922         pci_disable_device(pdev);
2923 }
2924
2925 /* PCI Error Recovery (ERS) */
2926 static const struct pci_error_handlers igbvf_err_handler = {
2927         .error_detected = igbvf_io_error_detected,
2928         .slot_reset = igbvf_io_slot_reset,
2929         .resume = igbvf_io_resume,
2930 };
2931
2932 static const struct pci_device_id igbvf_pci_tbl[] = {
2933         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
2934         { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf },
2935         { } /* terminate list */
2936 };
2937 MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
2938
2939 static SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume);
2940
2941 /* PCI Device API Driver */
2942 static struct pci_driver igbvf_driver = {
2943         .name           = igbvf_driver_name,
2944         .id_table       = igbvf_pci_tbl,
2945         .probe          = igbvf_probe,
2946         .remove         = igbvf_remove,
2947         .driver.pm      = &igbvf_pm_ops,
2948         .shutdown       = igbvf_shutdown,
2949         .err_handler    = &igbvf_err_handler
2950 };
2951
2952 /**
2953  * igbvf_init_module - Driver Registration Routine
2954  *
2955  * igbvf_init_module is the first routine called when the driver is
2956  * loaded. All it does is register with the PCI subsystem.
2957  **/
2958 static int __init igbvf_init_module(void)
2959 {
2960         int ret;
2961
2962         pr_info("%s\n", igbvf_driver_string);
2963         pr_info("%s\n", igbvf_copyright);
2964
2965         ret = pci_register_driver(&igbvf_driver);
2966
2967         return ret;
2968 }
2969 module_init(igbvf_init_module);
2970
2971 /**
2972  * igbvf_exit_module - Driver Exit Cleanup Routine
2973  *
2974  * igbvf_exit_module is called just before the driver is removed
2975  * from memory.
2976  **/
2977 static void __exit igbvf_exit_module(void)
2978 {
2979         pci_unregister_driver(&igbvf_driver);
2980 }
2981 module_exit(igbvf_exit_module);
2982
2983 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
2984 MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
2985 MODULE_LICENSE("GPL v2");
2986
2987 /* netdev.c */