gve: Update mgmt_msix_idx if num_ntfy changes
[linux-2.6-microblaze.git] / drivers / net / ethernet / google / gve / gve_main.c
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2019 Google, Inc.
5  */
6
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/sched.h>
13 #include <linux/timer.h>
14 #include <linux/workqueue.h>
15 #include <net/sch_generic.h>
16 #include "gve.h"
17 #include "gve_adminq.h"
18 #include "gve_register.h"
19
20 #define GVE_DEFAULT_RX_COPYBREAK        (256)
21
22 #define DEFAULT_MSG_LEVEL       (NETIF_MSG_DRV | NETIF_MSG_LINK)
23 #define GVE_VERSION             "1.0.0"
24 #define GVE_VERSION_PREFIX      "GVE-"
25
26 const char gve_version_str[] = GVE_VERSION;
27 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
28
29 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
30 {
31         struct gve_priv *priv = netdev_priv(dev);
32         unsigned int start;
33         int ring;
34
35         if (priv->rx) {
36                 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
37                         do {
38                                 start =
39                                   u64_stats_fetch_begin(&priv->rx[ring].statss);
40                                 s->rx_packets += priv->rx[ring].rpackets;
41                                 s->rx_bytes += priv->rx[ring].rbytes;
42                         } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
43                                                        start));
44                 }
45         }
46         if (priv->tx) {
47                 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
48                         do {
49                                 start =
50                                   u64_stats_fetch_begin(&priv->tx[ring].statss);
51                                 s->tx_packets += priv->tx[ring].pkt_done;
52                                 s->tx_bytes += priv->tx[ring].bytes_done;
53                         } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
54                                                        start));
55                 }
56         }
57 }
58
59 static int gve_alloc_counter_array(struct gve_priv *priv)
60 {
61         priv->counter_array =
62                 dma_alloc_coherent(&priv->pdev->dev,
63                                    priv->num_event_counters *
64                                    sizeof(*priv->counter_array),
65                                    &priv->counter_array_bus, GFP_KERNEL);
66         if (!priv->counter_array)
67                 return -ENOMEM;
68
69         return 0;
70 }
71
72 static void gve_free_counter_array(struct gve_priv *priv)
73 {
74         dma_free_coherent(&priv->pdev->dev,
75                           priv->num_event_counters *
76                           sizeof(*priv->counter_array),
77                           priv->counter_array, priv->counter_array_bus);
78         priv->counter_array = NULL;
79 }
80
81 /* NIC requests to report stats */
82 static void gve_stats_report_task(struct work_struct *work)
83 {
84         struct gve_priv *priv = container_of(work, struct gve_priv,
85                                              stats_report_task);
86         if (gve_get_do_report_stats(priv)) {
87                 gve_handle_report_stats(priv);
88                 gve_clear_do_report_stats(priv);
89         }
90 }
91
92 static void gve_stats_report_schedule(struct gve_priv *priv)
93 {
94         if (!gve_get_probe_in_progress(priv) &&
95             !gve_get_reset_in_progress(priv)) {
96                 gve_set_do_report_stats(priv);
97                 queue_work(priv->gve_wq, &priv->stats_report_task);
98         }
99 }
100
101 static void gve_stats_report_timer(struct timer_list *t)
102 {
103         struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
104
105         mod_timer(&priv->stats_report_timer,
106                   round_jiffies(jiffies +
107                   msecs_to_jiffies(priv->stats_report_timer_period)));
108         gve_stats_report_schedule(priv);
109 }
110
111 static int gve_alloc_stats_report(struct gve_priv *priv)
112 {
113         int tx_stats_num, rx_stats_num;
114
115         tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
116                        priv->tx_cfg.num_queues;
117         rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
118                        priv->rx_cfg.num_queues;
119         priv->stats_report_len = struct_size(priv->stats_report, stats,
120                                              tx_stats_num + rx_stats_num);
121         priv->stats_report =
122                 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
123                                    &priv->stats_report_bus, GFP_KERNEL);
124         if (!priv->stats_report)
125                 return -ENOMEM;
126         /* Set up timer for the report-stats task */
127         timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
128         priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
129         return 0;
130 }
131
132 static void gve_free_stats_report(struct gve_priv *priv)
133 {
134         del_timer_sync(&priv->stats_report_timer);
135         dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
136                           priv->stats_report, priv->stats_report_bus);
137         priv->stats_report = NULL;
138 }
139
140 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
141 {
142         struct gve_priv *priv = arg;
143
144         queue_work(priv->gve_wq, &priv->service_task);
145         return IRQ_HANDLED;
146 }
147
148 static irqreturn_t gve_intr(int irq, void *arg)
149 {
150         struct gve_notify_block *block = arg;
151         struct gve_priv *priv = block->priv;
152
153         iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
154         napi_schedule_irqoff(&block->napi);
155         return IRQ_HANDLED;
156 }
157
158 static int gve_napi_poll(struct napi_struct *napi, int budget)
159 {
160         struct gve_notify_block *block;
161         __be32 __iomem *irq_doorbell;
162         bool reschedule = false;
163         struct gve_priv *priv;
164
165         block = container_of(napi, struct gve_notify_block, napi);
166         priv = block->priv;
167
168         if (block->tx)
169                 reschedule |= gve_tx_poll(block, budget);
170         if (block->rx)
171                 reschedule |= gve_rx_poll(block, budget);
172
173         if (reschedule)
174                 return budget;
175
176         napi_complete(napi);
177         irq_doorbell = gve_irq_doorbell(priv, block);
178         iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
179
180         /* Double check we have no extra work.
181          * Ensure unmask synchronizes with checking for work.
182          */
183         dma_rmb();
184         if (block->tx)
185                 reschedule |= gve_tx_poll(block, -1);
186         if (block->rx)
187                 reschedule |= gve_rx_poll(block, -1);
188         if (reschedule && napi_reschedule(napi))
189                 iowrite32be(GVE_IRQ_MASK, irq_doorbell);
190
191         return 0;
192 }
193
194 static int gve_alloc_notify_blocks(struct gve_priv *priv)
195 {
196         int num_vecs_requested = priv->num_ntfy_blks + 1;
197         char *name = priv->dev->name;
198         unsigned int active_cpus;
199         int vecs_enabled;
200         int i, j;
201         int err;
202
203         priv->msix_vectors = kvzalloc(num_vecs_requested *
204                                       sizeof(*priv->msix_vectors), GFP_KERNEL);
205         if (!priv->msix_vectors)
206                 return -ENOMEM;
207         for (i = 0; i < num_vecs_requested; i++)
208                 priv->msix_vectors[i].entry = i;
209         vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
210                                              GVE_MIN_MSIX, num_vecs_requested);
211         if (vecs_enabled < 0) {
212                 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
213                         GVE_MIN_MSIX, vecs_enabled);
214                 err = vecs_enabled;
215                 goto abort_with_msix_vectors;
216         }
217         if (vecs_enabled != num_vecs_requested) {
218                 int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
219                 int vecs_per_type = new_num_ntfy_blks / 2;
220                 int vecs_left = new_num_ntfy_blks % 2;
221
222                 priv->num_ntfy_blks = new_num_ntfy_blks;
223                 priv->mgmt_msix_idx = priv->num_ntfy_blks;
224                 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
225                                                 vecs_per_type);
226                 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
227                                                 vecs_per_type + vecs_left);
228                 dev_err(&priv->pdev->dev,
229                         "Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
230                         vecs_enabled, priv->tx_cfg.max_queues,
231                         priv->rx_cfg.max_queues);
232                 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
233                         priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
234                 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
235                         priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
236         }
237         /* Half the notification blocks go to TX and half to RX */
238         active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
239
240         /* Setup Management Vector  - the last vector */
241         snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
242                  name);
243         err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
244                           gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
245         if (err) {
246                 dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
247                 goto abort_with_msix_enabled;
248         }
249         priv->ntfy_blocks =
250                 dma_alloc_coherent(&priv->pdev->dev,
251                                    priv->num_ntfy_blks *
252                                    sizeof(*priv->ntfy_blocks),
253                                    &priv->ntfy_block_bus, GFP_KERNEL);
254         if (!priv->ntfy_blocks) {
255                 err = -ENOMEM;
256                 goto abort_with_mgmt_vector;
257         }
258         /* Setup the other blocks - the first n-1 vectors */
259         for (i = 0; i < priv->num_ntfy_blks; i++) {
260                 struct gve_notify_block *block = &priv->ntfy_blocks[i];
261                 int msix_idx = i;
262
263                 snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
264                          name, i);
265                 block->priv = priv;
266                 err = request_irq(priv->msix_vectors[msix_idx].vector,
267                                   gve_intr, 0, block->name, block);
268                 if (err) {
269                         dev_err(&priv->pdev->dev,
270                                 "Failed to receive msix vector %d\n", i);
271                         goto abort_with_some_ntfy_blocks;
272                 }
273                 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
274                                       get_cpu_mask(i % active_cpus));
275         }
276         return 0;
277 abort_with_some_ntfy_blocks:
278         for (j = 0; j < i; j++) {
279                 struct gve_notify_block *block = &priv->ntfy_blocks[j];
280                 int msix_idx = j;
281
282                 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
283                                       NULL);
284                 free_irq(priv->msix_vectors[msix_idx].vector, block);
285         }
286         dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
287                           sizeof(*priv->ntfy_blocks),
288                           priv->ntfy_blocks, priv->ntfy_block_bus);
289         priv->ntfy_blocks = NULL;
290 abort_with_mgmt_vector:
291         free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
292 abort_with_msix_enabled:
293         pci_disable_msix(priv->pdev);
294 abort_with_msix_vectors:
295         kvfree(priv->msix_vectors);
296         priv->msix_vectors = NULL;
297         return err;
298 }
299
300 static void gve_free_notify_blocks(struct gve_priv *priv)
301 {
302         int i;
303
304         /* Free the irqs */
305         for (i = 0; i < priv->num_ntfy_blks; i++) {
306                 struct gve_notify_block *block = &priv->ntfy_blocks[i];
307                 int msix_idx = i;
308
309                 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
310                                       NULL);
311                 free_irq(priv->msix_vectors[msix_idx].vector, block);
312         }
313         dma_free_coherent(&priv->pdev->dev,
314                           priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
315                           priv->ntfy_blocks, priv->ntfy_block_bus);
316         priv->ntfy_blocks = NULL;
317         free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
318         pci_disable_msix(priv->pdev);
319         kvfree(priv->msix_vectors);
320         priv->msix_vectors = NULL;
321 }
322
323 static int gve_setup_device_resources(struct gve_priv *priv)
324 {
325         int err;
326
327         err = gve_alloc_counter_array(priv);
328         if (err)
329                 return err;
330         err = gve_alloc_notify_blocks(priv);
331         if (err)
332                 goto abort_with_counter;
333         err = gve_alloc_stats_report(priv);
334         if (err)
335                 goto abort_with_ntfy_blocks;
336         err = gve_adminq_configure_device_resources(priv,
337                                                     priv->counter_array_bus,
338                                                     priv->num_event_counters,
339                                                     priv->ntfy_block_bus,
340                                                     priv->num_ntfy_blks);
341         if (unlikely(err)) {
342                 dev_err(&priv->pdev->dev,
343                         "could not setup device_resources: err=%d\n", err);
344                 err = -ENXIO;
345                 goto abort_with_stats_report;
346         }
347         err = gve_adminq_report_stats(priv, priv->stats_report_len,
348                                       priv->stats_report_bus,
349                                       GVE_STATS_REPORT_TIMER_PERIOD);
350         if (err)
351                 dev_err(&priv->pdev->dev,
352                         "Failed to report stats: err=%d\n", err);
353         gve_set_device_resources_ok(priv);
354         return 0;
355 abort_with_stats_report:
356         gve_free_stats_report(priv);
357 abort_with_ntfy_blocks:
358         gve_free_notify_blocks(priv);
359 abort_with_counter:
360         gve_free_counter_array(priv);
361         return err;
362 }
363
364 static void gve_trigger_reset(struct gve_priv *priv);
365
366 static void gve_teardown_device_resources(struct gve_priv *priv)
367 {
368         int err;
369
370         /* Tell device its resources are being freed */
371         if (gve_get_device_resources_ok(priv)) {
372                 /* detach the stats report */
373                 err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
374                 if (err) {
375                         dev_err(&priv->pdev->dev,
376                                 "Failed to detach stats report: err=%d\n", err);
377                         gve_trigger_reset(priv);
378                 }
379                 err = gve_adminq_deconfigure_device_resources(priv);
380                 if (err) {
381                         dev_err(&priv->pdev->dev,
382                                 "Could not deconfigure device resources: err=%d\n",
383                                 err);
384                         gve_trigger_reset(priv);
385                 }
386         }
387         gve_free_counter_array(priv);
388         gve_free_notify_blocks(priv);
389         gve_free_stats_report(priv);
390         gve_clear_device_resources_ok(priv);
391 }
392
393 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx)
394 {
395         struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
396
397         netif_napi_add(priv->dev, &block->napi, gve_napi_poll,
398                        NAPI_POLL_WEIGHT);
399 }
400
401 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
402 {
403         struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
404
405         netif_napi_del(&block->napi);
406 }
407
408 static int gve_register_qpls(struct gve_priv *priv)
409 {
410         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
411         int err;
412         int i;
413
414         for (i = 0; i < num_qpls; i++) {
415                 err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
416                 if (err) {
417                         netif_err(priv, drv, priv->dev,
418                                   "failed to register queue page list %d\n",
419                                   priv->qpls[i].id);
420                         /* This failure will trigger a reset - no need to clean
421                          * up
422                          */
423                         return err;
424                 }
425         }
426         return 0;
427 }
428
429 static int gve_unregister_qpls(struct gve_priv *priv)
430 {
431         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
432         int err;
433         int i;
434
435         for (i = 0; i < num_qpls; i++) {
436                 err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
437                 /* This failure will trigger a reset - no need to clean up */
438                 if (err) {
439                         netif_err(priv, drv, priv->dev,
440                                   "Failed to unregister queue page list %d\n",
441                                   priv->qpls[i].id);
442                         return err;
443                 }
444         }
445         return 0;
446 }
447
448 static int gve_create_rings(struct gve_priv *priv)
449 {
450         int err;
451         int i;
452
453         err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
454         if (err) {
455                 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
456                           priv->tx_cfg.num_queues);
457                 /* This failure will trigger a reset - no need to clean
458                  * up
459                  */
460                 return err;
461         }
462         netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
463                   priv->tx_cfg.num_queues);
464
465         err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
466         if (err) {
467                 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
468                           priv->rx_cfg.num_queues);
469                 /* This failure will trigger a reset - no need to clean
470                  * up
471                  */
472                 return err;
473         }
474         netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
475                   priv->rx_cfg.num_queues);
476
477         /* Rx data ring has been prefilled with packet buffers at queue
478          * allocation time.
479          * Write the doorbell to provide descriptor slots and packet buffers
480          * to the NIC.
481          */
482         for (i = 0; i < priv->rx_cfg.num_queues; i++)
483                 gve_rx_write_doorbell(priv, &priv->rx[i]);
484
485         return 0;
486 }
487
488 static int gve_alloc_rings(struct gve_priv *priv)
489 {
490         int ntfy_idx;
491         int err;
492         int i;
493
494         /* Setup tx rings */
495         priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
496                             GFP_KERNEL);
497         if (!priv->tx)
498                 return -ENOMEM;
499         err = gve_tx_alloc_rings(priv);
500         if (err)
501                 goto free_tx;
502         /* Setup rx rings */
503         priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
504                             GFP_KERNEL);
505         if (!priv->rx) {
506                 err = -ENOMEM;
507                 goto free_tx_queue;
508         }
509         err = gve_rx_alloc_rings(priv);
510         if (err)
511                 goto free_rx;
512         /* Add tx napi & init sync stats*/
513         for (i = 0; i < priv->tx_cfg.num_queues; i++) {
514                 u64_stats_init(&priv->tx[i].statss);
515                 ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
516                 gve_add_napi(priv, ntfy_idx);
517         }
518         /* Add rx napi  & init sync stats*/
519         for (i = 0; i < priv->rx_cfg.num_queues; i++) {
520                 u64_stats_init(&priv->rx[i].statss);
521                 ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
522                 gve_add_napi(priv, ntfy_idx);
523         }
524
525         return 0;
526
527 free_rx:
528         kvfree(priv->rx);
529         priv->rx = NULL;
530 free_tx_queue:
531         gve_tx_free_rings(priv);
532 free_tx:
533         kvfree(priv->tx);
534         priv->tx = NULL;
535         return err;
536 }
537
538 static int gve_destroy_rings(struct gve_priv *priv)
539 {
540         int err;
541
542         err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
543         if (err) {
544                 netif_err(priv, drv, priv->dev,
545                           "failed to destroy tx queues\n");
546                 /* This failure will trigger a reset - no need to clean up */
547                 return err;
548         }
549         netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
550         err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
551         if (err) {
552                 netif_err(priv, drv, priv->dev,
553                           "failed to destroy rx queues\n");
554                 /* This failure will trigger a reset - no need to clean up */
555                 return err;
556         }
557         netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
558         return 0;
559 }
560
561 static void gve_free_rings(struct gve_priv *priv)
562 {
563         int ntfy_idx;
564         int i;
565
566         if (priv->tx) {
567                 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
568                         ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
569                         gve_remove_napi(priv, ntfy_idx);
570                 }
571                 gve_tx_free_rings(priv);
572                 kvfree(priv->tx);
573                 priv->tx = NULL;
574         }
575         if (priv->rx) {
576                 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
577                         ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
578                         gve_remove_napi(priv, ntfy_idx);
579                 }
580                 gve_rx_free_rings(priv);
581                 kvfree(priv->rx);
582                 priv->rx = NULL;
583         }
584 }
585
586 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
587                    struct page **page, dma_addr_t *dma,
588                    enum dma_data_direction dir)
589 {
590         *page = alloc_page(GFP_KERNEL);
591         if (!*page) {
592                 priv->page_alloc_fail++;
593                 return -ENOMEM;
594         }
595         *dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
596         if (dma_mapping_error(dev, *dma)) {
597                 priv->dma_mapping_error++;
598                 put_page(*page);
599                 return -ENOMEM;
600         }
601         return 0;
602 }
603
604 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
605                                      int pages)
606 {
607         struct gve_queue_page_list *qpl = &priv->qpls[id];
608         int err;
609         int i;
610
611         if (pages + priv->num_registered_pages > priv->max_registered_pages) {
612                 netif_err(priv, drv, priv->dev,
613                           "Reached max number of registered pages %llu > %llu\n",
614                           pages + priv->num_registered_pages,
615                           priv->max_registered_pages);
616                 return -EINVAL;
617         }
618
619         qpl->id = id;
620         qpl->num_entries = 0;
621         qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
622         /* caller handles clean up */
623         if (!qpl->pages)
624                 return -ENOMEM;
625         qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses),
626                                    GFP_KERNEL);
627         /* caller handles clean up */
628         if (!qpl->page_buses)
629                 return -ENOMEM;
630
631         for (i = 0; i < pages; i++) {
632                 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
633                                      &qpl->page_buses[i],
634                                      gve_qpl_dma_dir(priv, id));
635                 /* caller handles clean up */
636                 if (err)
637                         return -ENOMEM;
638                 qpl->num_entries++;
639         }
640         priv->num_registered_pages += pages;
641
642         return 0;
643 }
644
645 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
646                    enum dma_data_direction dir)
647 {
648         if (!dma_mapping_error(dev, dma))
649                 dma_unmap_page(dev, dma, PAGE_SIZE, dir);
650         if (page)
651                 put_page(page);
652 }
653
654 static void gve_free_queue_page_list(struct gve_priv *priv,
655                                      int id)
656 {
657         struct gve_queue_page_list *qpl = &priv->qpls[id];
658         int i;
659
660         if (!qpl->pages)
661                 return;
662         if (!qpl->page_buses)
663                 goto free_pages;
664
665         for (i = 0; i < qpl->num_entries; i++)
666                 gve_free_page(&priv->pdev->dev, qpl->pages[i],
667                               qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
668
669         kvfree(qpl->page_buses);
670 free_pages:
671         kvfree(qpl->pages);
672         priv->num_registered_pages -= qpl->num_entries;
673 }
674
675 static int gve_alloc_qpls(struct gve_priv *priv)
676 {
677         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
678         int i, j;
679         int err;
680
681         /* Raw addressing means no QPLs */
682         if (priv->raw_addressing)
683                 return 0;
684
685         priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
686         if (!priv->qpls)
687                 return -ENOMEM;
688
689         for (i = 0; i < gve_num_tx_qpls(priv); i++) {
690                 err = gve_alloc_queue_page_list(priv, i,
691                                                 priv->tx_pages_per_qpl);
692                 if (err)
693                         goto free_qpls;
694         }
695         for (; i < num_qpls; i++) {
696                 err = gve_alloc_queue_page_list(priv, i,
697                                                 priv->rx_data_slot_cnt);
698                 if (err)
699                         goto free_qpls;
700         }
701
702         priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
703                                      sizeof(unsigned long) * BITS_PER_BYTE;
704         priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) *
705                                             sizeof(unsigned long), GFP_KERNEL);
706         if (!priv->qpl_cfg.qpl_id_map) {
707                 err = -ENOMEM;
708                 goto free_qpls;
709         }
710
711         return 0;
712
713 free_qpls:
714         for (j = 0; j <= i; j++)
715                 gve_free_queue_page_list(priv, j);
716         kvfree(priv->qpls);
717         return err;
718 }
719
720 static void gve_free_qpls(struct gve_priv *priv)
721 {
722         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
723         int i;
724
725         /* Raw addressing means no QPLs */
726         if (priv->raw_addressing)
727                 return;
728
729         kvfree(priv->qpl_cfg.qpl_id_map);
730
731         for (i = 0; i < num_qpls; i++)
732                 gve_free_queue_page_list(priv, i);
733
734         kvfree(priv->qpls);
735 }
736
737 /* Use this to schedule a reset when the device is capable of continuing
738  * to handle other requests in its current state. If it is not, do a reset
739  * in thread instead.
740  */
741 void gve_schedule_reset(struct gve_priv *priv)
742 {
743         gve_set_do_reset(priv);
744         queue_work(priv->gve_wq, &priv->service_task);
745 }
746
747 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
748 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
749 static void gve_turndown(struct gve_priv *priv);
750 static void gve_turnup(struct gve_priv *priv);
751
752 static int gve_open(struct net_device *dev)
753 {
754         struct gve_priv *priv = netdev_priv(dev);
755         int err;
756
757         err = gve_alloc_qpls(priv);
758         if (err)
759                 return err;
760         err = gve_alloc_rings(priv);
761         if (err)
762                 goto free_qpls;
763
764         err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
765         if (err)
766                 goto free_rings;
767         err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
768         if (err)
769                 goto free_rings;
770
771         err = gve_register_qpls(priv);
772         if (err)
773                 goto reset;
774         err = gve_create_rings(priv);
775         if (err)
776                 goto reset;
777         gve_set_device_rings_ok(priv);
778
779         if (gve_get_report_stats(priv))
780                 mod_timer(&priv->stats_report_timer,
781                           round_jiffies(jiffies +
782                                 msecs_to_jiffies(priv->stats_report_timer_period)));
783
784         gve_turnup(priv);
785         queue_work(priv->gve_wq, &priv->service_task);
786         priv->interface_up_cnt++;
787         return 0;
788
789 free_rings:
790         gve_free_rings(priv);
791 free_qpls:
792         gve_free_qpls(priv);
793         return err;
794
795 reset:
796         /* This must have been called from a reset due to the rtnl lock
797          * so just return at this point.
798          */
799         if (gve_get_reset_in_progress(priv))
800                 return err;
801         /* Otherwise reset before returning */
802         gve_reset_and_teardown(priv, true);
803         /* if this fails there is nothing we can do so just ignore the return */
804         gve_reset_recovery(priv, false);
805         /* return the original error */
806         return err;
807 }
808
809 static int gve_close(struct net_device *dev)
810 {
811         struct gve_priv *priv = netdev_priv(dev);
812         int err;
813
814         netif_carrier_off(dev);
815         if (gve_get_device_rings_ok(priv)) {
816                 gve_turndown(priv);
817                 err = gve_destroy_rings(priv);
818                 if (err)
819                         goto err;
820                 err = gve_unregister_qpls(priv);
821                 if (err)
822                         goto err;
823                 gve_clear_device_rings_ok(priv);
824         }
825         del_timer_sync(&priv->stats_report_timer);
826
827         gve_free_rings(priv);
828         gve_free_qpls(priv);
829         priv->interface_down_cnt++;
830         return 0;
831
832 err:
833         /* This must have been called from a reset due to the rtnl lock
834          * so just return at this point.
835          */
836         if (gve_get_reset_in_progress(priv))
837                 return err;
838         /* Otherwise reset before returning */
839         gve_reset_and_teardown(priv, true);
840         return gve_reset_recovery(priv, false);
841 }
842
843 int gve_adjust_queues(struct gve_priv *priv,
844                       struct gve_queue_config new_rx_config,
845                       struct gve_queue_config new_tx_config)
846 {
847         int err;
848
849         if (netif_carrier_ok(priv->dev)) {
850                 /* To make this process as simple as possible we teardown the
851                  * device, set the new configuration, and then bring the device
852                  * up again.
853                  */
854                 err = gve_close(priv->dev);
855                 /* we have already tried to reset in close,
856                  * just fail at this point
857                  */
858                 if (err)
859                         return err;
860                 priv->tx_cfg = new_tx_config;
861                 priv->rx_cfg = new_rx_config;
862
863                 err = gve_open(priv->dev);
864                 if (err)
865                         goto err;
866
867                 return 0;
868         }
869         /* Set the config for the next up. */
870         priv->tx_cfg = new_tx_config;
871         priv->rx_cfg = new_rx_config;
872
873         return 0;
874 err:
875         netif_err(priv, drv, priv->dev,
876                   "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
877         gve_turndown(priv);
878         return err;
879 }
880
881 static void gve_turndown(struct gve_priv *priv)
882 {
883         int idx;
884
885         if (netif_carrier_ok(priv->dev))
886                 netif_carrier_off(priv->dev);
887
888         if (!gve_get_napi_enabled(priv))
889                 return;
890
891         /* Disable napi to prevent more work from coming in */
892         for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
893                 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
894                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
895
896                 napi_disable(&block->napi);
897         }
898         for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
899                 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
900                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
901
902                 napi_disable(&block->napi);
903         }
904
905         /* Stop tx queues */
906         netif_tx_disable(priv->dev);
907
908         gve_clear_napi_enabled(priv);
909         gve_clear_report_stats(priv);
910 }
911
912 static void gve_turnup(struct gve_priv *priv)
913 {
914         int idx;
915
916         /* Start the tx queues */
917         netif_tx_start_all_queues(priv->dev);
918
919         /* Enable napi and unmask interrupts for all queues */
920         for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
921                 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
922                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
923
924                 napi_enable(&block->napi);
925                 iowrite32be(0, gve_irq_doorbell(priv, block));
926         }
927         for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
928                 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
929                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
930
931                 napi_enable(&block->napi);
932                 iowrite32be(0, gve_irq_doorbell(priv, block));
933         }
934
935         gve_set_napi_enabled(priv);
936 }
937
938 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
939 {
940         struct gve_priv *priv = netdev_priv(dev);
941
942         gve_schedule_reset(priv);
943         priv->tx_timeo_cnt++;
944 }
945
946 static const struct net_device_ops gve_netdev_ops = {
947         .ndo_start_xmit         =       gve_tx,
948         .ndo_open               =       gve_open,
949         .ndo_stop               =       gve_close,
950         .ndo_get_stats64        =       gve_get_stats,
951         .ndo_tx_timeout         =       gve_tx_timeout,
952 };
953
954 static void gve_handle_status(struct gve_priv *priv, u32 status)
955 {
956         if (GVE_DEVICE_STATUS_RESET_MASK & status) {
957                 dev_info(&priv->pdev->dev, "Device requested reset.\n");
958                 gve_set_do_reset(priv);
959         }
960         if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
961                 priv->stats_report_trigger_cnt++;
962                 gve_set_do_report_stats(priv);
963         }
964 }
965
966 static void gve_handle_reset(struct gve_priv *priv)
967 {
968         /* A service task will be scheduled at the end of probe to catch any
969          * resets that need to happen, and we don't want to reset until
970          * probe is done.
971          */
972         if (gve_get_probe_in_progress(priv))
973                 return;
974
975         if (gve_get_do_reset(priv)) {
976                 rtnl_lock();
977                 gve_reset(priv, false);
978                 rtnl_unlock();
979         }
980 }
981
982 void gve_handle_report_stats(struct gve_priv *priv)
983 {
984         int idx, stats_idx = 0, tx_bytes;
985         unsigned int start = 0;
986         struct stats *stats = priv->stats_report->stats;
987
988         if (!gve_get_report_stats(priv))
989                 return;
990
991         be64_add_cpu(&priv->stats_report->written_count, 1);
992         /* tx stats */
993         if (priv->tx) {
994                 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
995                         do {
996                                 start = u64_stats_fetch_begin(&priv->tx[idx].statss);
997                                 tx_bytes = priv->tx[idx].bytes_done;
998                         } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
999                         stats[stats_idx++] = (struct stats) {
1000                                 .stat_name = cpu_to_be32(TX_WAKE_CNT),
1001                                 .value = cpu_to_be64(priv->tx[idx].wake_queue),
1002                                 .queue_id = cpu_to_be32(idx),
1003                         };
1004                         stats[stats_idx++] = (struct stats) {
1005                                 .stat_name = cpu_to_be32(TX_STOP_CNT),
1006                                 .value = cpu_to_be64(priv->tx[idx].stop_queue),
1007                                 .queue_id = cpu_to_be32(idx),
1008                         };
1009                         stats[stats_idx++] = (struct stats) {
1010                                 .stat_name = cpu_to_be32(TX_FRAMES_SENT),
1011                                 .value = cpu_to_be64(priv->tx[idx].req),
1012                                 .queue_id = cpu_to_be32(idx),
1013                         };
1014                         stats[stats_idx++] = (struct stats) {
1015                                 .stat_name = cpu_to_be32(TX_BYTES_SENT),
1016                                 .value = cpu_to_be64(tx_bytes),
1017                                 .queue_id = cpu_to_be32(idx),
1018                         };
1019                         stats[stats_idx++] = (struct stats) {
1020                                 .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
1021                                 .value = cpu_to_be64(priv->tx[idx].done),
1022                                 .queue_id = cpu_to_be32(idx),
1023                         };
1024                 }
1025         }
1026         /* rx stats */
1027         if (priv->rx) {
1028                 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1029                         stats[stats_idx++] = (struct stats) {
1030                                 .stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
1031                                 .value = cpu_to_be64(priv->rx[idx].desc.seqno),
1032                                 .queue_id = cpu_to_be32(idx),
1033                         };
1034                         stats[stats_idx++] = (struct stats) {
1035                                 .stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
1036                                 .value = cpu_to_be64(priv->rx[0].fill_cnt),
1037                                 .queue_id = cpu_to_be32(idx),
1038                         };
1039                 }
1040         }
1041 }
1042
1043 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1044 {
1045         if (!gve_get_napi_enabled(priv))
1046                 return;
1047
1048         if (link_status == netif_carrier_ok(priv->dev))
1049                 return;
1050
1051         if (link_status) {
1052                 netdev_info(priv->dev, "Device link is up.\n");
1053                 netif_carrier_on(priv->dev);
1054         } else {
1055                 netdev_info(priv->dev, "Device link is down.\n");
1056                 netif_carrier_off(priv->dev);
1057         }
1058 }
1059
1060 /* Handle NIC status register changes, reset requests and report stats */
1061 static void gve_service_task(struct work_struct *work)
1062 {
1063         struct gve_priv *priv = container_of(work, struct gve_priv,
1064                                              service_task);
1065         u32 status = ioread32be(&priv->reg_bar0->device_status);
1066
1067         gve_handle_status(priv, status);
1068
1069         gve_handle_reset(priv);
1070         gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1071 }
1072
1073 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1074 {
1075         int num_ntfy;
1076         int err;
1077
1078         /* Set up the adminq */
1079         err = gve_adminq_alloc(&priv->pdev->dev, priv);
1080         if (err) {
1081                 dev_err(&priv->pdev->dev,
1082                         "Failed to alloc admin queue: err=%d\n", err);
1083                 return err;
1084         }
1085
1086         if (skip_describe_device)
1087                 goto setup_device;
1088
1089         priv->raw_addressing = false;
1090         /* Get the initial information we need from the device */
1091         err = gve_adminq_describe_device(priv);
1092         if (err) {
1093                 dev_err(&priv->pdev->dev,
1094                         "Could not get device information: err=%d\n", err);
1095                 goto err;
1096         }
1097         if (priv->dev->max_mtu > PAGE_SIZE) {
1098                 priv->dev->max_mtu = PAGE_SIZE;
1099                 err = gve_adminq_set_mtu(priv, priv->dev->mtu);
1100                 if (err) {
1101                         dev_err(&priv->pdev->dev, "Could not set mtu");
1102                         goto err;
1103                 }
1104         }
1105         priv->dev->mtu = priv->dev->max_mtu;
1106         num_ntfy = pci_msix_vec_count(priv->pdev);
1107         if (num_ntfy <= 0) {
1108                 dev_err(&priv->pdev->dev,
1109                         "could not count MSI-x vectors: err=%d\n", num_ntfy);
1110                 err = num_ntfy;
1111                 goto err;
1112         } else if (num_ntfy < GVE_MIN_MSIX) {
1113                 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
1114                         GVE_MIN_MSIX, num_ntfy);
1115                 err = -EINVAL;
1116                 goto err;
1117         }
1118
1119         priv->num_registered_pages = 0;
1120         priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
1121         /* gvnic has one Notification Block per MSI-x vector, except for the
1122          * management vector
1123          */
1124         priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1125         priv->mgmt_msix_idx = priv->num_ntfy_blks;
1126
1127         priv->tx_cfg.max_queues =
1128                 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
1129         priv->rx_cfg.max_queues =
1130                 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
1131
1132         priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
1133         priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
1134         if (priv->default_num_queues > 0) {
1135                 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
1136                                                 priv->tx_cfg.num_queues);
1137                 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
1138                                                 priv->rx_cfg.num_queues);
1139         }
1140
1141         dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
1142                  priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
1143         dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
1144                  priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
1145
1146 setup_device:
1147         err = gve_setup_device_resources(priv);
1148         if (!err)
1149                 return 0;
1150 err:
1151         gve_adminq_free(&priv->pdev->dev, priv);
1152         return err;
1153 }
1154
1155 static void gve_teardown_priv_resources(struct gve_priv *priv)
1156 {
1157         gve_teardown_device_resources(priv);
1158         gve_adminq_free(&priv->pdev->dev, priv);
1159 }
1160
1161 static void gve_trigger_reset(struct gve_priv *priv)
1162 {
1163         /* Reset the device by releasing the AQ */
1164         gve_adminq_release(priv);
1165 }
1166
1167 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
1168 {
1169         gve_trigger_reset(priv);
1170         /* With the reset having already happened, close cannot fail */
1171         if (was_up)
1172                 gve_close(priv->dev);
1173         gve_teardown_priv_resources(priv);
1174 }
1175
1176 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
1177 {
1178         int err;
1179
1180         err = gve_init_priv(priv, true);
1181         if (err)
1182                 goto err;
1183         if (was_up) {
1184                 err = gve_open(priv->dev);
1185                 if (err)
1186                         goto err;
1187         }
1188         return 0;
1189 err:
1190         dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
1191         gve_turndown(priv);
1192         return err;
1193 }
1194
1195 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
1196 {
1197         bool was_up = netif_carrier_ok(priv->dev);
1198         int err;
1199
1200         dev_info(&priv->pdev->dev, "Performing reset\n");
1201         gve_clear_do_reset(priv);
1202         gve_set_reset_in_progress(priv);
1203         /* If we aren't attempting to teardown normally, just go turndown and
1204          * reset right away.
1205          */
1206         if (!attempt_teardown) {
1207                 gve_turndown(priv);
1208                 gve_reset_and_teardown(priv, was_up);
1209         } else {
1210                 /* Otherwise attempt to close normally */
1211                 if (was_up) {
1212                         err = gve_close(priv->dev);
1213                         /* If that fails reset as we did above */
1214                         if (err)
1215                                 gve_reset_and_teardown(priv, was_up);
1216                 }
1217                 /* Clean up any remaining resources */
1218                 gve_teardown_priv_resources(priv);
1219         }
1220
1221         /* Set it all back up */
1222         err = gve_reset_recovery(priv, was_up);
1223         gve_clear_reset_in_progress(priv);
1224         priv->reset_cnt++;
1225         priv->interface_up_cnt = 0;
1226         priv->interface_down_cnt = 0;
1227         priv->stats_report_trigger_cnt = 0;
1228         return err;
1229 }
1230
1231 static void gve_write_version(u8 __iomem *driver_version_register)
1232 {
1233         const char *c = gve_version_prefix;
1234
1235         while (*c) {
1236                 writeb(*c, driver_version_register);
1237                 c++;
1238         }
1239
1240         c = gve_version_str;
1241         while (*c) {
1242                 writeb(*c, driver_version_register);
1243                 c++;
1244         }
1245         writeb('\n', driver_version_register);
1246 }
1247
1248 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1249 {
1250         int max_tx_queues, max_rx_queues;
1251         struct net_device *dev;
1252         __be32 __iomem *db_bar;
1253         struct gve_registers __iomem *reg_bar;
1254         struct gve_priv *priv;
1255         int err;
1256
1257         err = pci_enable_device(pdev);
1258         if (err)
1259                 return -ENXIO;
1260
1261         err = pci_request_regions(pdev, "gvnic-cfg");
1262         if (err)
1263                 goto abort_with_enabled;
1264
1265         pci_set_master(pdev);
1266
1267         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1268         if (err) {
1269                 dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
1270                 goto abort_with_pci_region;
1271         }
1272
1273         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1274         if (err) {
1275                 dev_err(&pdev->dev,
1276                         "Failed to set consistent dma mask: err=%d\n", err);
1277                 goto abort_with_pci_region;
1278         }
1279
1280         reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
1281         if (!reg_bar) {
1282                 dev_err(&pdev->dev, "Failed to map pci bar!\n");
1283                 err = -ENOMEM;
1284                 goto abort_with_pci_region;
1285         }
1286
1287         db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
1288         if (!db_bar) {
1289                 dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
1290                 err = -ENOMEM;
1291                 goto abort_with_reg_bar;
1292         }
1293
1294         gve_write_version(&reg_bar->driver_version);
1295         /* Get max queues to alloc etherdev */
1296         max_rx_queues = ioread32be(&reg_bar->max_tx_queues);
1297         max_tx_queues = ioread32be(&reg_bar->max_rx_queues);
1298         /* Alloc and setup the netdev and priv */
1299         dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
1300         if (!dev) {
1301                 dev_err(&pdev->dev, "could not allocate netdev\n");
1302                 goto abort_with_db_bar;
1303         }
1304         SET_NETDEV_DEV(dev, &pdev->dev);
1305         pci_set_drvdata(pdev, dev);
1306         dev->ethtool_ops = &gve_ethtool_ops;
1307         dev->netdev_ops = &gve_netdev_ops;
1308         /* advertise features */
1309         dev->hw_features = NETIF_F_HIGHDMA;
1310         dev->hw_features |= NETIF_F_SG;
1311         dev->hw_features |= NETIF_F_HW_CSUM;
1312         dev->hw_features |= NETIF_F_TSO;
1313         dev->hw_features |= NETIF_F_TSO6;
1314         dev->hw_features |= NETIF_F_TSO_ECN;
1315         dev->hw_features |= NETIF_F_RXCSUM;
1316         dev->hw_features |= NETIF_F_RXHASH;
1317         dev->features = dev->hw_features;
1318         dev->watchdog_timeo = 5 * HZ;
1319         dev->min_mtu = ETH_MIN_MTU;
1320         netif_carrier_off(dev);
1321
1322         priv = netdev_priv(dev);
1323         priv->dev = dev;
1324         priv->pdev = pdev;
1325         priv->msg_enable = DEFAULT_MSG_LEVEL;
1326         priv->reg_bar0 = reg_bar;
1327         priv->db_bar2 = db_bar;
1328         priv->service_task_flags = 0x0;
1329         priv->state_flags = 0x0;
1330         priv->ethtool_flags = 0x0;
1331
1332         gve_set_probe_in_progress(priv);
1333         priv->gve_wq = alloc_ordered_workqueue("gve", 0);
1334         if (!priv->gve_wq) {
1335                 dev_err(&pdev->dev, "Could not allocate workqueue");
1336                 err = -ENOMEM;
1337                 goto abort_with_netdev;
1338         }
1339         INIT_WORK(&priv->service_task, gve_service_task);
1340         INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
1341         priv->tx_cfg.max_queues = max_tx_queues;
1342         priv->rx_cfg.max_queues = max_rx_queues;
1343
1344         err = gve_init_priv(priv, false);
1345         if (err)
1346                 goto abort_with_wq;
1347
1348         err = register_netdev(dev);
1349         if (err)
1350                 goto abort_with_wq;
1351
1352         dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
1353         gve_clear_probe_in_progress(priv);
1354         queue_work(priv->gve_wq, &priv->service_task);
1355         return 0;
1356
1357 abort_with_wq:
1358         destroy_workqueue(priv->gve_wq);
1359
1360 abort_with_netdev:
1361         free_netdev(dev);
1362
1363 abort_with_db_bar:
1364         pci_iounmap(pdev, db_bar);
1365
1366 abort_with_reg_bar:
1367         pci_iounmap(pdev, reg_bar);
1368
1369 abort_with_pci_region:
1370         pci_release_regions(pdev);
1371
1372 abort_with_enabled:
1373         pci_disable_device(pdev);
1374         return -ENXIO;
1375 }
1376
1377 static void gve_remove(struct pci_dev *pdev)
1378 {
1379         struct net_device *netdev = pci_get_drvdata(pdev);
1380         struct gve_priv *priv = netdev_priv(netdev);
1381         __be32 __iomem *db_bar = priv->db_bar2;
1382         void __iomem *reg_bar = priv->reg_bar0;
1383
1384         unregister_netdev(netdev);
1385         gve_teardown_priv_resources(priv);
1386         destroy_workqueue(priv->gve_wq);
1387         free_netdev(netdev);
1388         pci_iounmap(pdev, db_bar);
1389         pci_iounmap(pdev, reg_bar);
1390         pci_release_regions(pdev);
1391         pci_disable_device(pdev);
1392 }
1393
1394 static const struct pci_device_id gve_id_table[] = {
1395         { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
1396         { }
1397 };
1398
1399 static struct pci_driver gvnic_driver = {
1400         .name           = "gvnic",
1401         .id_table       = gve_id_table,
1402         .probe          = gve_probe,
1403         .remove         = gve_remove,
1404 };
1405
1406 module_pci_driver(gvnic_driver);
1407
1408 MODULE_DEVICE_TABLE(pci, gve_id_table);
1409 MODULE_AUTHOR("Google, Inc.");
1410 MODULE_DESCRIPTION("gVNIC Driver");
1411 MODULE_LICENSE("Dual MIT/GPL");
1412 MODULE_VERSION(GVE_VERSION);