Merge remote-tracking branch 'spi/for-5.14' into spi-next
[linux-2.6-microblaze.git] / drivers / net / ethernet / google / gve / gve_main.c
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2019 Google, Inc.
5  */
6
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/sched.h>
13 #include <linux/timer.h>
14 #include <linux/workqueue.h>
15 #include <net/sch_generic.h>
16 #include "gve.h"
17 #include "gve_adminq.h"
18 #include "gve_register.h"
19
20 #define GVE_DEFAULT_RX_COPYBREAK        (256)
21
22 #define DEFAULT_MSG_LEVEL       (NETIF_MSG_DRV | NETIF_MSG_LINK)
23 #define GVE_VERSION             "1.0.0"
24 #define GVE_VERSION_PREFIX      "GVE-"
25
26 const char gve_version_str[] = GVE_VERSION;
27 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
28
29 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
30 {
31         struct gve_priv *priv = netdev_priv(dev);
32         unsigned int start;
33         int ring;
34
35         if (priv->rx) {
36                 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
37                         do {
38                                 start =
39                                   u64_stats_fetch_begin(&priv->rx[ring].statss);
40                                 s->rx_packets += priv->rx[ring].rpackets;
41                                 s->rx_bytes += priv->rx[ring].rbytes;
42                         } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
43                                                        start));
44                 }
45         }
46         if (priv->tx) {
47                 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
48                         do {
49                                 start =
50                                   u64_stats_fetch_begin(&priv->tx[ring].statss);
51                                 s->tx_packets += priv->tx[ring].pkt_done;
52                                 s->tx_bytes += priv->tx[ring].bytes_done;
53                         } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
54                                                        start));
55                 }
56         }
57 }
58
59 static int gve_alloc_counter_array(struct gve_priv *priv)
60 {
61         priv->counter_array =
62                 dma_alloc_coherent(&priv->pdev->dev,
63                                    priv->num_event_counters *
64                                    sizeof(*priv->counter_array),
65                                    &priv->counter_array_bus, GFP_KERNEL);
66         if (!priv->counter_array)
67                 return -ENOMEM;
68
69         return 0;
70 }
71
72 static void gve_free_counter_array(struct gve_priv *priv)
73 {
74         dma_free_coherent(&priv->pdev->dev,
75                           priv->num_event_counters *
76                           sizeof(*priv->counter_array),
77                           priv->counter_array, priv->counter_array_bus);
78         priv->counter_array = NULL;
79 }
80
81 /* NIC requests to report stats */
82 static void gve_stats_report_task(struct work_struct *work)
83 {
84         struct gve_priv *priv = container_of(work, struct gve_priv,
85                                              stats_report_task);
86         if (gve_get_do_report_stats(priv)) {
87                 gve_handle_report_stats(priv);
88                 gve_clear_do_report_stats(priv);
89         }
90 }
91
92 static void gve_stats_report_schedule(struct gve_priv *priv)
93 {
94         if (!gve_get_probe_in_progress(priv) &&
95             !gve_get_reset_in_progress(priv)) {
96                 gve_set_do_report_stats(priv);
97                 queue_work(priv->gve_wq, &priv->stats_report_task);
98         }
99 }
100
101 static void gve_stats_report_timer(struct timer_list *t)
102 {
103         struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
104
105         mod_timer(&priv->stats_report_timer,
106                   round_jiffies(jiffies +
107                   msecs_to_jiffies(priv->stats_report_timer_period)));
108         gve_stats_report_schedule(priv);
109 }
110
111 static int gve_alloc_stats_report(struct gve_priv *priv)
112 {
113         int tx_stats_num, rx_stats_num;
114
115         tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
116                        priv->tx_cfg.num_queues;
117         rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
118                        priv->rx_cfg.num_queues;
119         priv->stats_report_len = struct_size(priv->stats_report, stats,
120                                              tx_stats_num + rx_stats_num);
121         priv->stats_report =
122                 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
123                                    &priv->stats_report_bus, GFP_KERNEL);
124         if (!priv->stats_report)
125                 return -ENOMEM;
126         /* Set up timer for the report-stats task */
127         timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
128         priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
129         return 0;
130 }
131
132 static void gve_free_stats_report(struct gve_priv *priv)
133 {
134         del_timer_sync(&priv->stats_report_timer);
135         dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
136                           priv->stats_report, priv->stats_report_bus);
137         priv->stats_report = NULL;
138 }
139
140 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
141 {
142         struct gve_priv *priv = arg;
143
144         queue_work(priv->gve_wq, &priv->service_task);
145         return IRQ_HANDLED;
146 }
147
148 static irqreturn_t gve_intr(int irq, void *arg)
149 {
150         struct gve_notify_block *block = arg;
151         struct gve_priv *priv = block->priv;
152
153         iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
154         napi_schedule_irqoff(&block->napi);
155         return IRQ_HANDLED;
156 }
157
158 static int gve_napi_poll(struct napi_struct *napi, int budget)
159 {
160         struct gve_notify_block *block;
161         __be32 __iomem *irq_doorbell;
162         bool reschedule = false;
163         struct gve_priv *priv;
164
165         block = container_of(napi, struct gve_notify_block, napi);
166         priv = block->priv;
167
168         if (block->tx)
169                 reschedule |= gve_tx_poll(block, budget);
170         if (block->rx)
171                 reschedule |= gve_rx_poll(block, budget);
172
173         if (reschedule)
174                 return budget;
175
176         napi_complete(napi);
177         irq_doorbell = gve_irq_doorbell(priv, block);
178         iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
179
180         /* Double check we have no extra work.
181          * Ensure unmask synchronizes with checking for work.
182          */
183         mb();
184         if (block->tx)
185                 reschedule |= gve_tx_poll(block, -1);
186         if (block->rx)
187                 reschedule |= gve_rx_poll(block, -1);
188         if (reschedule && napi_reschedule(napi))
189                 iowrite32be(GVE_IRQ_MASK, irq_doorbell);
190
191         return 0;
192 }
193
194 static int gve_alloc_notify_blocks(struct gve_priv *priv)
195 {
196         int num_vecs_requested = priv->num_ntfy_blks + 1;
197         char *name = priv->dev->name;
198         unsigned int active_cpus;
199         int vecs_enabled;
200         int i, j;
201         int err;
202
203         priv->msix_vectors = kvzalloc(num_vecs_requested *
204                                       sizeof(*priv->msix_vectors), GFP_KERNEL);
205         if (!priv->msix_vectors)
206                 return -ENOMEM;
207         for (i = 0; i < num_vecs_requested; i++)
208                 priv->msix_vectors[i].entry = i;
209         vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
210                                              GVE_MIN_MSIX, num_vecs_requested);
211         if (vecs_enabled < 0) {
212                 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
213                         GVE_MIN_MSIX, vecs_enabled);
214                 err = vecs_enabled;
215                 goto abort_with_msix_vectors;
216         }
217         if (vecs_enabled != num_vecs_requested) {
218                 int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
219                 int vecs_per_type = new_num_ntfy_blks / 2;
220                 int vecs_left = new_num_ntfy_blks % 2;
221
222                 priv->num_ntfy_blks = new_num_ntfy_blks;
223                 priv->mgmt_msix_idx = priv->num_ntfy_blks;
224                 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
225                                                 vecs_per_type);
226                 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
227                                                 vecs_per_type + vecs_left);
228                 dev_err(&priv->pdev->dev,
229                         "Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
230                         vecs_enabled, priv->tx_cfg.max_queues,
231                         priv->rx_cfg.max_queues);
232                 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
233                         priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
234                 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
235                         priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
236         }
237         /* Half the notification blocks go to TX and half to RX */
238         active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
239
240         /* Setup Management Vector  - the last vector */
241         snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
242                  name);
243         err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
244                           gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
245         if (err) {
246                 dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
247                 goto abort_with_msix_enabled;
248         }
249         priv->ntfy_blocks =
250                 dma_alloc_coherent(&priv->pdev->dev,
251                                    priv->num_ntfy_blks *
252                                    sizeof(*priv->ntfy_blocks),
253                                    &priv->ntfy_block_bus, GFP_KERNEL);
254         if (!priv->ntfy_blocks) {
255                 err = -ENOMEM;
256                 goto abort_with_mgmt_vector;
257         }
258         /* Setup the other blocks - the first n-1 vectors */
259         for (i = 0; i < priv->num_ntfy_blks; i++) {
260                 struct gve_notify_block *block = &priv->ntfy_blocks[i];
261                 int msix_idx = i;
262
263                 snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
264                          name, i);
265                 block->priv = priv;
266                 err = request_irq(priv->msix_vectors[msix_idx].vector,
267                                   gve_intr, 0, block->name, block);
268                 if (err) {
269                         dev_err(&priv->pdev->dev,
270                                 "Failed to receive msix vector %d\n", i);
271                         goto abort_with_some_ntfy_blocks;
272                 }
273                 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
274                                       get_cpu_mask(i % active_cpus));
275         }
276         return 0;
277 abort_with_some_ntfy_blocks:
278         for (j = 0; j < i; j++) {
279                 struct gve_notify_block *block = &priv->ntfy_blocks[j];
280                 int msix_idx = j;
281
282                 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
283                                       NULL);
284                 free_irq(priv->msix_vectors[msix_idx].vector, block);
285         }
286         dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
287                           sizeof(*priv->ntfy_blocks),
288                           priv->ntfy_blocks, priv->ntfy_block_bus);
289         priv->ntfy_blocks = NULL;
290 abort_with_mgmt_vector:
291         free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
292 abort_with_msix_enabled:
293         pci_disable_msix(priv->pdev);
294 abort_with_msix_vectors:
295         kvfree(priv->msix_vectors);
296         priv->msix_vectors = NULL;
297         return err;
298 }
299
300 static void gve_free_notify_blocks(struct gve_priv *priv)
301 {
302         int i;
303
304         if (priv->msix_vectors) {
305                 /* Free the irqs */
306                 for (i = 0; i < priv->num_ntfy_blks; i++) {
307                         struct gve_notify_block *block = &priv->ntfy_blocks[i];
308                         int msix_idx = i;
309
310                         irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
311                                               NULL);
312                         free_irq(priv->msix_vectors[msix_idx].vector, block);
313                 }
314                 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
315         }
316         dma_free_coherent(&priv->pdev->dev,
317                           priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
318                           priv->ntfy_blocks, priv->ntfy_block_bus);
319         priv->ntfy_blocks = NULL;
320         pci_disable_msix(priv->pdev);
321         kvfree(priv->msix_vectors);
322         priv->msix_vectors = NULL;
323 }
324
325 static int gve_setup_device_resources(struct gve_priv *priv)
326 {
327         int err;
328
329         err = gve_alloc_counter_array(priv);
330         if (err)
331                 return err;
332         err = gve_alloc_notify_blocks(priv);
333         if (err)
334                 goto abort_with_counter;
335         err = gve_alloc_stats_report(priv);
336         if (err)
337                 goto abort_with_ntfy_blocks;
338         err = gve_adminq_configure_device_resources(priv,
339                                                     priv->counter_array_bus,
340                                                     priv->num_event_counters,
341                                                     priv->ntfy_block_bus,
342                                                     priv->num_ntfy_blks);
343         if (unlikely(err)) {
344                 dev_err(&priv->pdev->dev,
345                         "could not setup device_resources: err=%d\n", err);
346                 err = -ENXIO;
347                 goto abort_with_stats_report;
348         }
349         err = gve_adminq_report_stats(priv, priv->stats_report_len,
350                                       priv->stats_report_bus,
351                                       GVE_STATS_REPORT_TIMER_PERIOD);
352         if (err)
353                 dev_err(&priv->pdev->dev,
354                         "Failed to report stats: err=%d\n", err);
355         gve_set_device_resources_ok(priv);
356         return 0;
357 abort_with_stats_report:
358         gve_free_stats_report(priv);
359 abort_with_ntfy_blocks:
360         gve_free_notify_blocks(priv);
361 abort_with_counter:
362         gve_free_counter_array(priv);
363         return err;
364 }
365
366 static void gve_trigger_reset(struct gve_priv *priv);
367
368 static void gve_teardown_device_resources(struct gve_priv *priv)
369 {
370         int err;
371
372         /* Tell device its resources are being freed */
373         if (gve_get_device_resources_ok(priv)) {
374                 /* detach the stats report */
375                 err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
376                 if (err) {
377                         dev_err(&priv->pdev->dev,
378                                 "Failed to detach stats report: err=%d\n", err);
379                         gve_trigger_reset(priv);
380                 }
381                 err = gve_adminq_deconfigure_device_resources(priv);
382                 if (err) {
383                         dev_err(&priv->pdev->dev,
384                                 "Could not deconfigure device resources: err=%d\n",
385                                 err);
386                         gve_trigger_reset(priv);
387                 }
388         }
389         gve_free_counter_array(priv);
390         gve_free_notify_blocks(priv);
391         gve_free_stats_report(priv);
392         gve_clear_device_resources_ok(priv);
393 }
394
395 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx)
396 {
397         struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
398
399         netif_napi_add(priv->dev, &block->napi, gve_napi_poll,
400                        NAPI_POLL_WEIGHT);
401 }
402
403 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
404 {
405         struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
406
407         netif_napi_del(&block->napi);
408 }
409
410 static int gve_register_qpls(struct gve_priv *priv)
411 {
412         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
413         int err;
414         int i;
415
416         for (i = 0; i < num_qpls; i++) {
417                 err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
418                 if (err) {
419                         netif_err(priv, drv, priv->dev,
420                                   "failed to register queue page list %d\n",
421                                   priv->qpls[i].id);
422                         /* This failure will trigger a reset - no need to clean
423                          * up
424                          */
425                         return err;
426                 }
427         }
428         return 0;
429 }
430
431 static int gve_unregister_qpls(struct gve_priv *priv)
432 {
433         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
434         int err;
435         int i;
436
437         for (i = 0; i < num_qpls; i++) {
438                 err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
439                 /* This failure will trigger a reset - no need to clean up */
440                 if (err) {
441                         netif_err(priv, drv, priv->dev,
442                                   "Failed to unregister queue page list %d\n",
443                                   priv->qpls[i].id);
444                         return err;
445                 }
446         }
447         return 0;
448 }
449
450 static int gve_create_rings(struct gve_priv *priv)
451 {
452         int err;
453         int i;
454
455         err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
456         if (err) {
457                 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
458                           priv->tx_cfg.num_queues);
459                 /* This failure will trigger a reset - no need to clean
460                  * up
461                  */
462                 return err;
463         }
464         netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
465                   priv->tx_cfg.num_queues);
466
467         err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
468         if (err) {
469                 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
470                           priv->rx_cfg.num_queues);
471                 /* This failure will trigger a reset - no need to clean
472                  * up
473                  */
474                 return err;
475         }
476         netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
477                   priv->rx_cfg.num_queues);
478
479         /* Rx data ring has been prefilled with packet buffers at queue
480          * allocation time.
481          * Write the doorbell to provide descriptor slots and packet buffers
482          * to the NIC.
483          */
484         for (i = 0; i < priv->rx_cfg.num_queues; i++)
485                 gve_rx_write_doorbell(priv, &priv->rx[i]);
486
487         return 0;
488 }
489
490 static int gve_alloc_rings(struct gve_priv *priv)
491 {
492         int ntfy_idx;
493         int err;
494         int i;
495
496         /* Setup tx rings */
497         priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
498                             GFP_KERNEL);
499         if (!priv->tx)
500                 return -ENOMEM;
501         err = gve_tx_alloc_rings(priv);
502         if (err)
503                 goto free_tx;
504         /* Setup rx rings */
505         priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
506                             GFP_KERNEL);
507         if (!priv->rx) {
508                 err = -ENOMEM;
509                 goto free_tx_queue;
510         }
511         err = gve_rx_alloc_rings(priv);
512         if (err)
513                 goto free_rx;
514         /* Add tx napi & init sync stats*/
515         for (i = 0; i < priv->tx_cfg.num_queues; i++) {
516                 u64_stats_init(&priv->tx[i].statss);
517                 ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
518                 gve_add_napi(priv, ntfy_idx);
519         }
520         /* Add rx napi  & init sync stats*/
521         for (i = 0; i < priv->rx_cfg.num_queues; i++) {
522                 u64_stats_init(&priv->rx[i].statss);
523                 ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
524                 gve_add_napi(priv, ntfy_idx);
525         }
526
527         return 0;
528
529 free_rx:
530         kvfree(priv->rx);
531         priv->rx = NULL;
532 free_tx_queue:
533         gve_tx_free_rings(priv);
534 free_tx:
535         kvfree(priv->tx);
536         priv->tx = NULL;
537         return err;
538 }
539
540 static int gve_destroy_rings(struct gve_priv *priv)
541 {
542         int err;
543
544         err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
545         if (err) {
546                 netif_err(priv, drv, priv->dev,
547                           "failed to destroy tx queues\n");
548                 /* This failure will trigger a reset - no need to clean up */
549                 return err;
550         }
551         netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
552         err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
553         if (err) {
554                 netif_err(priv, drv, priv->dev,
555                           "failed to destroy rx queues\n");
556                 /* This failure will trigger a reset - no need to clean up */
557                 return err;
558         }
559         netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
560         return 0;
561 }
562
563 static void gve_free_rings(struct gve_priv *priv)
564 {
565         int ntfy_idx;
566         int i;
567
568         if (priv->tx) {
569                 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
570                         ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
571                         gve_remove_napi(priv, ntfy_idx);
572                 }
573                 gve_tx_free_rings(priv);
574                 kvfree(priv->tx);
575                 priv->tx = NULL;
576         }
577         if (priv->rx) {
578                 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
579                         ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
580                         gve_remove_napi(priv, ntfy_idx);
581                 }
582                 gve_rx_free_rings(priv);
583                 kvfree(priv->rx);
584                 priv->rx = NULL;
585         }
586 }
587
588 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
589                    struct page **page, dma_addr_t *dma,
590                    enum dma_data_direction dir)
591 {
592         *page = alloc_page(GFP_KERNEL);
593         if (!*page) {
594                 priv->page_alloc_fail++;
595                 return -ENOMEM;
596         }
597         *dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
598         if (dma_mapping_error(dev, *dma)) {
599                 priv->dma_mapping_error++;
600                 put_page(*page);
601                 return -ENOMEM;
602         }
603         return 0;
604 }
605
606 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
607                                      int pages)
608 {
609         struct gve_queue_page_list *qpl = &priv->qpls[id];
610         int err;
611         int i;
612
613         if (pages + priv->num_registered_pages > priv->max_registered_pages) {
614                 netif_err(priv, drv, priv->dev,
615                           "Reached max number of registered pages %llu > %llu\n",
616                           pages + priv->num_registered_pages,
617                           priv->max_registered_pages);
618                 return -EINVAL;
619         }
620
621         qpl->id = id;
622         qpl->num_entries = 0;
623         qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
624         /* caller handles clean up */
625         if (!qpl->pages)
626                 return -ENOMEM;
627         qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses),
628                                    GFP_KERNEL);
629         /* caller handles clean up */
630         if (!qpl->page_buses)
631                 return -ENOMEM;
632
633         for (i = 0; i < pages; i++) {
634                 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
635                                      &qpl->page_buses[i],
636                                      gve_qpl_dma_dir(priv, id));
637                 /* caller handles clean up */
638                 if (err)
639                         return -ENOMEM;
640                 qpl->num_entries++;
641         }
642         priv->num_registered_pages += pages;
643
644         return 0;
645 }
646
647 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
648                    enum dma_data_direction dir)
649 {
650         if (!dma_mapping_error(dev, dma))
651                 dma_unmap_page(dev, dma, PAGE_SIZE, dir);
652         if (page)
653                 put_page(page);
654 }
655
656 static void gve_free_queue_page_list(struct gve_priv *priv,
657                                      int id)
658 {
659         struct gve_queue_page_list *qpl = &priv->qpls[id];
660         int i;
661
662         if (!qpl->pages)
663                 return;
664         if (!qpl->page_buses)
665                 goto free_pages;
666
667         for (i = 0; i < qpl->num_entries; i++)
668                 gve_free_page(&priv->pdev->dev, qpl->pages[i],
669                               qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
670
671         kvfree(qpl->page_buses);
672 free_pages:
673         kvfree(qpl->pages);
674         priv->num_registered_pages -= qpl->num_entries;
675 }
676
677 static int gve_alloc_qpls(struct gve_priv *priv)
678 {
679         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
680         int i, j;
681         int err;
682
683         /* Raw addressing means no QPLs */
684         if (priv->raw_addressing)
685                 return 0;
686
687         priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
688         if (!priv->qpls)
689                 return -ENOMEM;
690
691         for (i = 0; i < gve_num_tx_qpls(priv); i++) {
692                 err = gve_alloc_queue_page_list(priv, i,
693                                                 priv->tx_pages_per_qpl);
694                 if (err)
695                         goto free_qpls;
696         }
697         for (; i < num_qpls; i++) {
698                 err = gve_alloc_queue_page_list(priv, i,
699                                                 priv->rx_data_slot_cnt);
700                 if (err)
701                         goto free_qpls;
702         }
703
704         priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
705                                      sizeof(unsigned long) * BITS_PER_BYTE;
706         priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) *
707                                             sizeof(unsigned long), GFP_KERNEL);
708         if (!priv->qpl_cfg.qpl_id_map) {
709                 err = -ENOMEM;
710                 goto free_qpls;
711         }
712
713         return 0;
714
715 free_qpls:
716         for (j = 0; j <= i; j++)
717                 gve_free_queue_page_list(priv, j);
718         kvfree(priv->qpls);
719         return err;
720 }
721
722 static void gve_free_qpls(struct gve_priv *priv)
723 {
724         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
725         int i;
726
727         /* Raw addressing means no QPLs */
728         if (priv->raw_addressing)
729                 return;
730
731         kvfree(priv->qpl_cfg.qpl_id_map);
732
733         for (i = 0; i < num_qpls; i++)
734                 gve_free_queue_page_list(priv, i);
735
736         kvfree(priv->qpls);
737 }
738
739 /* Use this to schedule a reset when the device is capable of continuing
740  * to handle other requests in its current state. If it is not, do a reset
741  * in thread instead.
742  */
743 void gve_schedule_reset(struct gve_priv *priv)
744 {
745         gve_set_do_reset(priv);
746         queue_work(priv->gve_wq, &priv->service_task);
747 }
748
749 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
750 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
751 static void gve_turndown(struct gve_priv *priv);
752 static void gve_turnup(struct gve_priv *priv);
753
754 static int gve_open(struct net_device *dev)
755 {
756         struct gve_priv *priv = netdev_priv(dev);
757         int err;
758
759         err = gve_alloc_qpls(priv);
760         if (err)
761                 return err;
762         err = gve_alloc_rings(priv);
763         if (err)
764                 goto free_qpls;
765
766         err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
767         if (err)
768                 goto free_rings;
769         err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
770         if (err)
771                 goto free_rings;
772
773         err = gve_register_qpls(priv);
774         if (err)
775                 goto reset;
776         err = gve_create_rings(priv);
777         if (err)
778                 goto reset;
779         gve_set_device_rings_ok(priv);
780
781         if (gve_get_report_stats(priv))
782                 mod_timer(&priv->stats_report_timer,
783                           round_jiffies(jiffies +
784                                 msecs_to_jiffies(priv->stats_report_timer_period)));
785
786         gve_turnup(priv);
787         queue_work(priv->gve_wq, &priv->service_task);
788         priv->interface_up_cnt++;
789         return 0;
790
791 free_rings:
792         gve_free_rings(priv);
793 free_qpls:
794         gve_free_qpls(priv);
795         return err;
796
797 reset:
798         /* This must have been called from a reset due to the rtnl lock
799          * so just return at this point.
800          */
801         if (gve_get_reset_in_progress(priv))
802                 return err;
803         /* Otherwise reset before returning */
804         gve_reset_and_teardown(priv, true);
805         /* if this fails there is nothing we can do so just ignore the return */
806         gve_reset_recovery(priv, false);
807         /* return the original error */
808         return err;
809 }
810
811 static int gve_close(struct net_device *dev)
812 {
813         struct gve_priv *priv = netdev_priv(dev);
814         int err;
815
816         netif_carrier_off(dev);
817         if (gve_get_device_rings_ok(priv)) {
818                 gve_turndown(priv);
819                 err = gve_destroy_rings(priv);
820                 if (err)
821                         goto err;
822                 err = gve_unregister_qpls(priv);
823                 if (err)
824                         goto err;
825                 gve_clear_device_rings_ok(priv);
826         }
827         del_timer_sync(&priv->stats_report_timer);
828
829         gve_free_rings(priv);
830         gve_free_qpls(priv);
831         priv->interface_down_cnt++;
832         return 0;
833
834 err:
835         /* This must have been called from a reset due to the rtnl lock
836          * so just return at this point.
837          */
838         if (gve_get_reset_in_progress(priv))
839                 return err;
840         /* Otherwise reset before returning */
841         gve_reset_and_teardown(priv, true);
842         return gve_reset_recovery(priv, false);
843 }
844
845 int gve_adjust_queues(struct gve_priv *priv,
846                       struct gve_queue_config new_rx_config,
847                       struct gve_queue_config new_tx_config)
848 {
849         int err;
850
851         if (netif_carrier_ok(priv->dev)) {
852                 /* To make this process as simple as possible we teardown the
853                  * device, set the new configuration, and then bring the device
854                  * up again.
855                  */
856                 err = gve_close(priv->dev);
857                 /* we have already tried to reset in close,
858                  * just fail at this point
859                  */
860                 if (err)
861                         return err;
862                 priv->tx_cfg = new_tx_config;
863                 priv->rx_cfg = new_rx_config;
864
865                 err = gve_open(priv->dev);
866                 if (err)
867                         goto err;
868
869                 return 0;
870         }
871         /* Set the config for the next up. */
872         priv->tx_cfg = new_tx_config;
873         priv->rx_cfg = new_rx_config;
874
875         return 0;
876 err:
877         netif_err(priv, drv, priv->dev,
878                   "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
879         gve_turndown(priv);
880         return err;
881 }
882
883 static void gve_turndown(struct gve_priv *priv)
884 {
885         int idx;
886
887         if (netif_carrier_ok(priv->dev))
888                 netif_carrier_off(priv->dev);
889
890         if (!gve_get_napi_enabled(priv))
891                 return;
892
893         /* Disable napi to prevent more work from coming in */
894         for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
895                 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
896                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
897
898                 napi_disable(&block->napi);
899         }
900         for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
901                 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
902                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
903
904                 napi_disable(&block->napi);
905         }
906
907         /* Stop tx queues */
908         netif_tx_disable(priv->dev);
909
910         gve_clear_napi_enabled(priv);
911         gve_clear_report_stats(priv);
912 }
913
914 static void gve_turnup(struct gve_priv *priv)
915 {
916         int idx;
917
918         /* Start the tx queues */
919         netif_tx_start_all_queues(priv->dev);
920
921         /* Enable napi and unmask interrupts for all queues */
922         for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
923                 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
924                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
925
926                 napi_enable(&block->napi);
927                 iowrite32be(0, gve_irq_doorbell(priv, block));
928         }
929         for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
930                 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
931                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
932
933                 napi_enable(&block->napi);
934                 iowrite32be(0, gve_irq_doorbell(priv, block));
935         }
936
937         gve_set_napi_enabled(priv);
938 }
939
940 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
941 {
942         struct gve_priv *priv = netdev_priv(dev);
943
944         gve_schedule_reset(priv);
945         priv->tx_timeo_cnt++;
946 }
947
948 static const struct net_device_ops gve_netdev_ops = {
949         .ndo_start_xmit         =       gve_tx,
950         .ndo_open               =       gve_open,
951         .ndo_stop               =       gve_close,
952         .ndo_get_stats64        =       gve_get_stats,
953         .ndo_tx_timeout         =       gve_tx_timeout,
954 };
955
956 static void gve_handle_status(struct gve_priv *priv, u32 status)
957 {
958         if (GVE_DEVICE_STATUS_RESET_MASK & status) {
959                 dev_info(&priv->pdev->dev, "Device requested reset.\n");
960                 gve_set_do_reset(priv);
961         }
962         if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
963                 priv->stats_report_trigger_cnt++;
964                 gve_set_do_report_stats(priv);
965         }
966 }
967
968 static void gve_handle_reset(struct gve_priv *priv)
969 {
970         /* A service task will be scheduled at the end of probe to catch any
971          * resets that need to happen, and we don't want to reset until
972          * probe is done.
973          */
974         if (gve_get_probe_in_progress(priv))
975                 return;
976
977         if (gve_get_do_reset(priv)) {
978                 rtnl_lock();
979                 gve_reset(priv, false);
980                 rtnl_unlock();
981         }
982 }
983
984 void gve_handle_report_stats(struct gve_priv *priv)
985 {
986         int idx, stats_idx = 0, tx_bytes;
987         unsigned int start = 0;
988         struct stats *stats = priv->stats_report->stats;
989
990         if (!gve_get_report_stats(priv))
991                 return;
992
993         be64_add_cpu(&priv->stats_report->written_count, 1);
994         /* tx stats */
995         if (priv->tx) {
996                 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
997                         do {
998                                 start = u64_stats_fetch_begin(&priv->tx[idx].statss);
999                                 tx_bytes = priv->tx[idx].bytes_done;
1000                         } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
1001                         stats[stats_idx++] = (struct stats) {
1002                                 .stat_name = cpu_to_be32(TX_WAKE_CNT),
1003                                 .value = cpu_to_be64(priv->tx[idx].wake_queue),
1004                                 .queue_id = cpu_to_be32(idx),
1005                         };
1006                         stats[stats_idx++] = (struct stats) {
1007                                 .stat_name = cpu_to_be32(TX_STOP_CNT),
1008                                 .value = cpu_to_be64(priv->tx[idx].stop_queue),
1009                                 .queue_id = cpu_to_be32(idx),
1010                         };
1011                         stats[stats_idx++] = (struct stats) {
1012                                 .stat_name = cpu_to_be32(TX_FRAMES_SENT),
1013                                 .value = cpu_to_be64(priv->tx[idx].req),
1014                                 .queue_id = cpu_to_be32(idx),
1015                         };
1016                         stats[stats_idx++] = (struct stats) {
1017                                 .stat_name = cpu_to_be32(TX_BYTES_SENT),
1018                                 .value = cpu_to_be64(tx_bytes),
1019                                 .queue_id = cpu_to_be32(idx),
1020                         };
1021                         stats[stats_idx++] = (struct stats) {
1022                                 .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
1023                                 .value = cpu_to_be64(priv->tx[idx].done),
1024                                 .queue_id = cpu_to_be32(idx),
1025                         };
1026                 }
1027         }
1028         /* rx stats */
1029         if (priv->rx) {
1030                 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1031                         stats[stats_idx++] = (struct stats) {
1032                                 .stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
1033                                 .value = cpu_to_be64(priv->rx[idx].desc.seqno),
1034                                 .queue_id = cpu_to_be32(idx),
1035                         };
1036                         stats[stats_idx++] = (struct stats) {
1037                                 .stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
1038                                 .value = cpu_to_be64(priv->rx[0].fill_cnt),
1039                                 .queue_id = cpu_to_be32(idx),
1040                         };
1041                 }
1042         }
1043 }
1044
1045 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1046 {
1047         if (!gve_get_napi_enabled(priv))
1048                 return;
1049
1050         if (link_status == netif_carrier_ok(priv->dev))
1051                 return;
1052
1053         if (link_status) {
1054                 netdev_info(priv->dev, "Device link is up.\n");
1055                 netif_carrier_on(priv->dev);
1056         } else {
1057                 netdev_info(priv->dev, "Device link is down.\n");
1058                 netif_carrier_off(priv->dev);
1059         }
1060 }
1061
1062 /* Handle NIC status register changes, reset requests and report stats */
1063 static void gve_service_task(struct work_struct *work)
1064 {
1065         struct gve_priv *priv = container_of(work, struct gve_priv,
1066                                              service_task);
1067         u32 status = ioread32be(&priv->reg_bar0->device_status);
1068
1069         gve_handle_status(priv, status);
1070
1071         gve_handle_reset(priv);
1072         gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1073 }
1074
1075 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1076 {
1077         int num_ntfy;
1078         int err;
1079
1080         /* Set up the adminq */
1081         err = gve_adminq_alloc(&priv->pdev->dev, priv);
1082         if (err) {
1083                 dev_err(&priv->pdev->dev,
1084                         "Failed to alloc admin queue: err=%d\n", err);
1085                 return err;
1086         }
1087
1088         if (skip_describe_device)
1089                 goto setup_device;
1090
1091         priv->raw_addressing = false;
1092         /* Get the initial information we need from the device */
1093         err = gve_adminq_describe_device(priv);
1094         if (err) {
1095                 dev_err(&priv->pdev->dev,
1096                         "Could not get device information: err=%d\n", err);
1097                 goto err;
1098         }
1099         if (priv->dev->max_mtu > PAGE_SIZE) {
1100                 priv->dev->max_mtu = PAGE_SIZE;
1101                 err = gve_adminq_set_mtu(priv, priv->dev->mtu);
1102                 if (err) {
1103                         dev_err(&priv->pdev->dev, "Could not set mtu");
1104                         goto err;
1105                 }
1106         }
1107         priv->dev->mtu = priv->dev->max_mtu;
1108         num_ntfy = pci_msix_vec_count(priv->pdev);
1109         if (num_ntfy <= 0) {
1110                 dev_err(&priv->pdev->dev,
1111                         "could not count MSI-x vectors: err=%d\n", num_ntfy);
1112                 err = num_ntfy;
1113                 goto err;
1114         } else if (num_ntfy < GVE_MIN_MSIX) {
1115                 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
1116                         GVE_MIN_MSIX, num_ntfy);
1117                 err = -EINVAL;
1118                 goto err;
1119         }
1120
1121         priv->num_registered_pages = 0;
1122         priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
1123         /* gvnic has one Notification Block per MSI-x vector, except for the
1124          * management vector
1125          */
1126         priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1127         priv->mgmt_msix_idx = priv->num_ntfy_blks;
1128
1129         priv->tx_cfg.max_queues =
1130                 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
1131         priv->rx_cfg.max_queues =
1132                 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
1133
1134         priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
1135         priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
1136         if (priv->default_num_queues > 0) {
1137                 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
1138                                                 priv->tx_cfg.num_queues);
1139                 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
1140                                                 priv->rx_cfg.num_queues);
1141         }
1142
1143         dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
1144                  priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
1145         dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
1146                  priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
1147
1148 setup_device:
1149         err = gve_setup_device_resources(priv);
1150         if (!err)
1151                 return 0;
1152 err:
1153         gve_adminq_free(&priv->pdev->dev, priv);
1154         return err;
1155 }
1156
1157 static void gve_teardown_priv_resources(struct gve_priv *priv)
1158 {
1159         gve_teardown_device_resources(priv);
1160         gve_adminq_free(&priv->pdev->dev, priv);
1161 }
1162
1163 static void gve_trigger_reset(struct gve_priv *priv)
1164 {
1165         /* Reset the device by releasing the AQ */
1166         gve_adminq_release(priv);
1167 }
1168
1169 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
1170 {
1171         gve_trigger_reset(priv);
1172         /* With the reset having already happened, close cannot fail */
1173         if (was_up)
1174                 gve_close(priv->dev);
1175         gve_teardown_priv_resources(priv);
1176 }
1177
1178 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
1179 {
1180         int err;
1181
1182         err = gve_init_priv(priv, true);
1183         if (err)
1184                 goto err;
1185         if (was_up) {
1186                 err = gve_open(priv->dev);
1187                 if (err)
1188                         goto err;
1189         }
1190         return 0;
1191 err:
1192         dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
1193         gve_turndown(priv);
1194         return err;
1195 }
1196
1197 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
1198 {
1199         bool was_up = netif_carrier_ok(priv->dev);
1200         int err;
1201
1202         dev_info(&priv->pdev->dev, "Performing reset\n");
1203         gve_clear_do_reset(priv);
1204         gve_set_reset_in_progress(priv);
1205         /* If we aren't attempting to teardown normally, just go turndown and
1206          * reset right away.
1207          */
1208         if (!attempt_teardown) {
1209                 gve_turndown(priv);
1210                 gve_reset_and_teardown(priv, was_up);
1211         } else {
1212                 /* Otherwise attempt to close normally */
1213                 if (was_up) {
1214                         err = gve_close(priv->dev);
1215                         /* If that fails reset as we did above */
1216                         if (err)
1217                                 gve_reset_and_teardown(priv, was_up);
1218                 }
1219                 /* Clean up any remaining resources */
1220                 gve_teardown_priv_resources(priv);
1221         }
1222
1223         /* Set it all back up */
1224         err = gve_reset_recovery(priv, was_up);
1225         gve_clear_reset_in_progress(priv);
1226         priv->reset_cnt++;
1227         priv->interface_up_cnt = 0;
1228         priv->interface_down_cnt = 0;
1229         priv->stats_report_trigger_cnt = 0;
1230         return err;
1231 }
1232
1233 static void gve_write_version(u8 __iomem *driver_version_register)
1234 {
1235         const char *c = gve_version_prefix;
1236
1237         while (*c) {
1238                 writeb(*c, driver_version_register);
1239                 c++;
1240         }
1241
1242         c = gve_version_str;
1243         while (*c) {
1244                 writeb(*c, driver_version_register);
1245                 c++;
1246         }
1247         writeb('\n', driver_version_register);
1248 }
1249
1250 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1251 {
1252         int max_tx_queues, max_rx_queues;
1253         struct net_device *dev;
1254         __be32 __iomem *db_bar;
1255         struct gve_registers __iomem *reg_bar;
1256         struct gve_priv *priv;
1257         int err;
1258
1259         err = pci_enable_device(pdev);
1260         if (err)
1261                 return -ENXIO;
1262
1263         err = pci_request_regions(pdev, "gvnic-cfg");
1264         if (err)
1265                 goto abort_with_enabled;
1266
1267         pci_set_master(pdev);
1268
1269         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1270         if (err) {
1271                 dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
1272                 goto abort_with_pci_region;
1273         }
1274
1275         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1276         if (err) {
1277                 dev_err(&pdev->dev,
1278                         "Failed to set consistent dma mask: err=%d\n", err);
1279                 goto abort_with_pci_region;
1280         }
1281
1282         reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
1283         if (!reg_bar) {
1284                 dev_err(&pdev->dev, "Failed to map pci bar!\n");
1285                 err = -ENOMEM;
1286                 goto abort_with_pci_region;
1287         }
1288
1289         db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
1290         if (!db_bar) {
1291                 dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
1292                 err = -ENOMEM;
1293                 goto abort_with_reg_bar;
1294         }
1295
1296         gve_write_version(&reg_bar->driver_version);
1297         /* Get max queues to alloc etherdev */
1298         max_rx_queues = ioread32be(&reg_bar->max_tx_queues);
1299         max_tx_queues = ioread32be(&reg_bar->max_rx_queues);
1300         /* Alloc and setup the netdev and priv */
1301         dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
1302         if (!dev) {
1303                 dev_err(&pdev->dev, "could not allocate netdev\n");
1304                 goto abort_with_db_bar;
1305         }
1306         SET_NETDEV_DEV(dev, &pdev->dev);
1307         pci_set_drvdata(pdev, dev);
1308         dev->ethtool_ops = &gve_ethtool_ops;
1309         dev->netdev_ops = &gve_netdev_ops;
1310         /* advertise features */
1311         dev->hw_features = NETIF_F_HIGHDMA;
1312         dev->hw_features |= NETIF_F_SG;
1313         dev->hw_features |= NETIF_F_HW_CSUM;
1314         dev->hw_features |= NETIF_F_TSO;
1315         dev->hw_features |= NETIF_F_TSO6;
1316         dev->hw_features |= NETIF_F_TSO_ECN;
1317         dev->hw_features |= NETIF_F_RXCSUM;
1318         dev->hw_features |= NETIF_F_RXHASH;
1319         dev->features = dev->hw_features;
1320         dev->watchdog_timeo = 5 * HZ;
1321         dev->min_mtu = ETH_MIN_MTU;
1322         netif_carrier_off(dev);
1323
1324         priv = netdev_priv(dev);
1325         priv->dev = dev;
1326         priv->pdev = pdev;
1327         priv->msg_enable = DEFAULT_MSG_LEVEL;
1328         priv->reg_bar0 = reg_bar;
1329         priv->db_bar2 = db_bar;
1330         priv->service_task_flags = 0x0;
1331         priv->state_flags = 0x0;
1332         priv->ethtool_flags = 0x0;
1333
1334         gve_set_probe_in_progress(priv);
1335         priv->gve_wq = alloc_ordered_workqueue("gve", 0);
1336         if (!priv->gve_wq) {
1337                 dev_err(&pdev->dev, "Could not allocate workqueue");
1338                 err = -ENOMEM;
1339                 goto abort_with_netdev;
1340         }
1341         INIT_WORK(&priv->service_task, gve_service_task);
1342         INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
1343         priv->tx_cfg.max_queues = max_tx_queues;
1344         priv->rx_cfg.max_queues = max_rx_queues;
1345
1346         err = gve_init_priv(priv, false);
1347         if (err)
1348                 goto abort_with_wq;
1349
1350         err = register_netdev(dev);
1351         if (err)
1352                 goto abort_with_wq;
1353
1354         dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
1355         gve_clear_probe_in_progress(priv);
1356         queue_work(priv->gve_wq, &priv->service_task);
1357         return 0;
1358
1359 abort_with_wq:
1360         destroy_workqueue(priv->gve_wq);
1361
1362 abort_with_netdev:
1363         free_netdev(dev);
1364
1365 abort_with_db_bar:
1366         pci_iounmap(pdev, db_bar);
1367
1368 abort_with_reg_bar:
1369         pci_iounmap(pdev, reg_bar);
1370
1371 abort_with_pci_region:
1372         pci_release_regions(pdev);
1373
1374 abort_with_enabled:
1375         pci_disable_device(pdev);
1376         return -ENXIO;
1377 }
1378
1379 static void gve_remove(struct pci_dev *pdev)
1380 {
1381         struct net_device *netdev = pci_get_drvdata(pdev);
1382         struct gve_priv *priv = netdev_priv(netdev);
1383         __be32 __iomem *db_bar = priv->db_bar2;
1384         void __iomem *reg_bar = priv->reg_bar0;
1385
1386         unregister_netdev(netdev);
1387         gve_teardown_priv_resources(priv);
1388         destroy_workqueue(priv->gve_wq);
1389         free_netdev(netdev);
1390         pci_iounmap(pdev, db_bar);
1391         pci_iounmap(pdev, reg_bar);
1392         pci_release_regions(pdev);
1393         pci_disable_device(pdev);
1394 }
1395
1396 static const struct pci_device_id gve_id_table[] = {
1397         { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
1398         { }
1399 };
1400
1401 static struct pci_driver gvnic_driver = {
1402         .name           = "gvnic",
1403         .id_table       = gve_id_table,
1404         .probe          = gve_probe,
1405         .remove         = gve_remove,
1406 };
1407
1408 module_pci_driver(gvnic_driver);
1409
1410 MODULE_DEVICE_TABLE(pci, gve_id_table);
1411 MODULE_AUTHOR("Google, Inc.");
1412 MODULE_DESCRIPTION("gVNIC Driver");
1413 MODULE_LICENSE("Dual MIT/GPL");
1414 MODULE_VERSION(GVE_VERSION);