gve: Add stats for gve.
[linux-2.6-microblaze.git] / drivers / net / ethernet / google / gve / gve_main.c
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2019 Google, Inc.
5  */
6
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/sched.h>
13 #include <linux/timer.h>
14 #include <linux/workqueue.h>
15 #include <net/sch_generic.h>
16 #include "gve.h"
17 #include "gve_adminq.h"
18 #include "gve_register.h"
19
20 #define GVE_DEFAULT_RX_COPYBREAK        (256)
21
22 #define DEFAULT_MSG_LEVEL       (NETIF_MSG_DRV | NETIF_MSG_LINK)
23 #define GVE_VERSION             "1.0.0"
24 #define GVE_VERSION_PREFIX      "GVE-"
25
26 const char gve_version_str[] = GVE_VERSION;
27 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
28
29 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
30 {
31         struct gve_priv *priv = netdev_priv(dev);
32         unsigned int start;
33         int ring;
34
35         if (priv->rx) {
36                 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
37                         do {
38                                 start =
39                                   u64_stats_fetch_begin(&priv->rx[ring].statss);
40                                 s->rx_packets += priv->rx[ring].rpackets;
41                                 s->rx_bytes += priv->rx[ring].rbytes;
42                         } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
43                                                        start));
44                 }
45         }
46         if (priv->tx) {
47                 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
48                         do {
49                                 start =
50                                   u64_stats_fetch_begin(&priv->tx[ring].statss);
51                                 s->tx_packets += priv->tx[ring].pkt_done;
52                                 s->tx_bytes += priv->tx[ring].bytes_done;
53                         } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
54                                                        start));
55                 }
56         }
57 }
58
59 static int gve_alloc_counter_array(struct gve_priv *priv)
60 {
61         priv->counter_array =
62                 dma_alloc_coherent(&priv->pdev->dev,
63                                    priv->num_event_counters *
64                                    sizeof(*priv->counter_array),
65                                    &priv->counter_array_bus, GFP_KERNEL);
66         if (!priv->counter_array)
67                 return -ENOMEM;
68
69         return 0;
70 }
71
72 static void gve_free_counter_array(struct gve_priv *priv)
73 {
74         dma_free_coherent(&priv->pdev->dev,
75                           priv->num_event_counters *
76                           sizeof(*priv->counter_array),
77                           priv->counter_array, priv->counter_array_bus);
78         priv->counter_array = NULL;
79 }
80
81 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
82 {
83         struct gve_priv *priv = arg;
84
85         queue_work(priv->gve_wq, &priv->service_task);
86         return IRQ_HANDLED;
87 }
88
89 static irqreturn_t gve_intr(int irq, void *arg)
90 {
91         struct gve_notify_block *block = arg;
92         struct gve_priv *priv = block->priv;
93
94         iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
95         napi_schedule_irqoff(&block->napi);
96         return IRQ_HANDLED;
97 }
98
99 static int gve_napi_poll(struct napi_struct *napi, int budget)
100 {
101         struct gve_notify_block *block;
102         __be32 __iomem *irq_doorbell;
103         bool reschedule = false;
104         struct gve_priv *priv;
105
106         block = container_of(napi, struct gve_notify_block, napi);
107         priv = block->priv;
108
109         if (block->tx)
110                 reschedule |= gve_tx_poll(block, budget);
111         if (block->rx)
112                 reschedule |= gve_rx_poll(block, budget);
113
114         if (reschedule)
115                 return budget;
116
117         napi_complete(napi);
118         irq_doorbell = gve_irq_doorbell(priv, block);
119         iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
120
121         /* Double check we have no extra work.
122          * Ensure unmask synchronizes with checking for work.
123          */
124         dma_rmb();
125         if (block->tx)
126                 reschedule |= gve_tx_poll(block, -1);
127         if (block->rx)
128                 reschedule |= gve_rx_poll(block, -1);
129         if (reschedule && napi_reschedule(napi))
130                 iowrite32be(GVE_IRQ_MASK, irq_doorbell);
131
132         return 0;
133 }
134
135 static int gve_alloc_notify_blocks(struct gve_priv *priv)
136 {
137         int num_vecs_requested = priv->num_ntfy_blks + 1;
138         char *name = priv->dev->name;
139         unsigned int active_cpus;
140         int vecs_enabled;
141         int i, j;
142         int err;
143
144         priv->msix_vectors = kvzalloc(num_vecs_requested *
145                                       sizeof(*priv->msix_vectors), GFP_KERNEL);
146         if (!priv->msix_vectors)
147                 return -ENOMEM;
148         for (i = 0; i < num_vecs_requested; i++)
149                 priv->msix_vectors[i].entry = i;
150         vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
151                                              GVE_MIN_MSIX, num_vecs_requested);
152         if (vecs_enabled < 0) {
153                 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
154                         GVE_MIN_MSIX, vecs_enabled);
155                 err = vecs_enabled;
156                 goto abort_with_msix_vectors;
157         }
158         if (vecs_enabled != num_vecs_requested) {
159                 int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
160                 int vecs_per_type = new_num_ntfy_blks / 2;
161                 int vecs_left = new_num_ntfy_blks % 2;
162
163                 priv->num_ntfy_blks = new_num_ntfy_blks;
164                 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
165                                                 vecs_per_type);
166                 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
167                                                 vecs_per_type + vecs_left);
168                 dev_err(&priv->pdev->dev,
169                         "Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
170                         vecs_enabled, priv->tx_cfg.max_queues,
171                         priv->rx_cfg.max_queues);
172                 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
173                         priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
174                 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
175                         priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
176         }
177         /* Half the notification blocks go to TX and half to RX */
178         active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
179
180         /* Setup Management Vector  - the last vector */
181         snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
182                  name);
183         err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
184                           gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
185         if (err) {
186                 dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
187                 goto abort_with_msix_enabled;
188         }
189         priv->ntfy_blocks =
190                 dma_alloc_coherent(&priv->pdev->dev,
191                                    priv->num_ntfy_blks *
192                                    sizeof(*priv->ntfy_blocks),
193                                    &priv->ntfy_block_bus, GFP_KERNEL);
194         if (!priv->ntfy_blocks) {
195                 err = -ENOMEM;
196                 goto abort_with_mgmt_vector;
197         }
198         /* Setup the other blocks - the first n-1 vectors */
199         for (i = 0; i < priv->num_ntfy_blks; i++) {
200                 struct gve_notify_block *block = &priv->ntfy_blocks[i];
201                 int msix_idx = i;
202
203                 snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
204                          name, i);
205                 block->priv = priv;
206                 err = request_irq(priv->msix_vectors[msix_idx].vector,
207                                   gve_intr, 0, block->name, block);
208                 if (err) {
209                         dev_err(&priv->pdev->dev,
210                                 "Failed to receive msix vector %d\n", i);
211                         goto abort_with_some_ntfy_blocks;
212                 }
213                 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
214                                       get_cpu_mask(i % active_cpus));
215         }
216         return 0;
217 abort_with_some_ntfy_blocks:
218         for (j = 0; j < i; j++) {
219                 struct gve_notify_block *block = &priv->ntfy_blocks[j];
220                 int msix_idx = j;
221
222                 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
223                                       NULL);
224                 free_irq(priv->msix_vectors[msix_idx].vector, block);
225         }
226         dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
227                           sizeof(*priv->ntfy_blocks),
228                           priv->ntfy_blocks, priv->ntfy_block_bus);
229         priv->ntfy_blocks = NULL;
230 abort_with_mgmt_vector:
231         free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
232 abort_with_msix_enabled:
233         pci_disable_msix(priv->pdev);
234 abort_with_msix_vectors:
235         kvfree(priv->msix_vectors);
236         priv->msix_vectors = NULL;
237         return err;
238 }
239
240 static void gve_free_notify_blocks(struct gve_priv *priv)
241 {
242         int i;
243
244         /* Free the irqs */
245         for (i = 0; i < priv->num_ntfy_blks; i++) {
246                 struct gve_notify_block *block = &priv->ntfy_blocks[i];
247                 int msix_idx = i;
248
249                 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
250                                       NULL);
251                 free_irq(priv->msix_vectors[msix_idx].vector, block);
252         }
253         dma_free_coherent(&priv->pdev->dev,
254                           priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
255                           priv->ntfy_blocks, priv->ntfy_block_bus);
256         priv->ntfy_blocks = NULL;
257         free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
258         pci_disable_msix(priv->pdev);
259         kvfree(priv->msix_vectors);
260         priv->msix_vectors = NULL;
261 }
262
263 static int gve_setup_device_resources(struct gve_priv *priv)
264 {
265         int err;
266
267         err = gve_alloc_counter_array(priv);
268         if (err)
269                 return err;
270         err = gve_alloc_notify_blocks(priv);
271         if (err)
272                 goto abort_with_counter;
273         err = gve_adminq_configure_device_resources(priv,
274                                                     priv->counter_array_bus,
275                                                     priv->num_event_counters,
276                                                     priv->ntfy_block_bus,
277                                                     priv->num_ntfy_blks);
278         if (unlikely(err)) {
279                 dev_err(&priv->pdev->dev,
280                         "could not setup device_resources: err=%d\n", err);
281                 err = -ENXIO;
282                 goto abort_with_ntfy_blocks;
283         }
284         gve_set_device_resources_ok(priv);
285         return 0;
286 abort_with_ntfy_blocks:
287         gve_free_notify_blocks(priv);
288 abort_with_counter:
289         gve_free_counter_array(priv);
290         return err;
291 }
292
293 static void gve_trigger_reset(struct gve_priv *priv);
294
295 static void gve_teardown_device_resources(struct gve_priv *priv)
296 {
297         int err;
298
299         /* Tell device its resources are being freed */
300         if (gve_get_device_resources_ok(priv)) {
301                 err = gve_adminq_deconfigure_device_resources(priv);
302                 if (err) {
303                         dev_err(&priv->pdev->dev,
304                                 "Could not deconfigure device resources: err=%d\n",
305                                 err);
306                         gve_trigger_reset(priv);
307                 }
308         }
309         gve_free_counter_array(priv);
310         gve_free_notify_blocks(priv);
311         gve_clear_device_resources_ok(priv);
312 }
313
314 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx)
315 {
316         struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
317
318         netif_napi_add(priv->dev, &block->napi, gve_napi_poll,
319                        NAPI_POLL_WEIGHT);
320 }
321
322 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
323 {
324         struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
325
326         netif_napi_del(&block->napi);
327 }
328
329 static int gve_register_qpls(struct gve_priv *priv)
330 {
331         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
332         int err;
333         int i;
334
335         for (i = 0; i < num_qpls; i++) {
336                 err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
337                 if (err) {
338                         netif_err(priv, drv, priv->dev,
339                                   "failed to register queue page list %d\n",
340                                   priv->qpls[i].id);
341                         /* This failure will trigger a reset - no need to clean
342                          * up
343                          */
344                         return err;
345                 }
346         }
347         return 0;
348 }
349
350 static int gve_unregister_qpls(struct gve_priv *priv)
351 {
352         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
353         int err;
354         int i;
355
356         for (i = 0; i < num_qpls; i++) {
357                 err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
358                 /* This failure will trigger a reset - no need to clean up */
359                 if (err) {
360                         netif_err(priv, drv, priv->dev,
361                                   "Failed to unregister queue page list %d\n",
362                                   priv->qpls[i].id);
363                         return err;
364                 }
365         }
366         return 0;
367 }
368
369 static int gve_create_rings(struct gve_priv *priv)
370 {
371         int err;
372         int i;
373
374         for (i = 0; i < priv->tx_cfg.num_queues; i++) {
375                 err = gve_adminq_create_tx_queue(priv, i);
376                 if (err) {
377                         netif_err(priv, drv, priv->dev, "failed to create tx queue %d\n",
378                                   i);
379                         /* This failure will trigger a reset - no need to clean
380                          * up
381                          */
382                         return err;
383                 }
384                 netif_dbg(priv, drv, priv->dev, "created tx queue %d\n", i);
385         }
386         for (i = 0; i < priv->rx_cfg.num_queues; i++) {
387                 err = gve_adminq_create_rx_queue(priv, i);
388                 if (err) {
389                         netif_err(priv, drv, priv->dev, "failed to create rx queue %d\n",
390                                   i);
391                         /* This failure will trigger a reset - no need to clean
392                          * up
393                          */
394                         return err;
395                 }
396                 /* Rx data ring has been prefilled with packet buffers at
397                  * queue allocation time.
398                  * Write the doorbell to provide descriptor slots and packet
399                  * buffers to the NIC.
400                  */
401                 gve_rx_write_doorbell(priv, &priv->rx[i]);
402                 netif_dbg(priv, drv, priv->dev, "created rx queue %d\n", i);
403         }
404
405         return 0;
406 }
407
408 static int gve_alloc_rings(struct gve_priv *priv)
409 {
410         int ntfy_idx;
411         int err;
412         int i;
413
414         /* Setup tx rings */
415         priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
416                             GFP_KERNEL);
417         if (!priv->tx)
418                 return -ENOMEM;
419         err = gve_tx_alloc_rings(priv);
420         if (err)
421                 goto free_tx;
422         /* Setup rx rings */
423         priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
424                             GFP_KERNEL);
425         if (!priv->rx) {
426                 err = -ENOMEM;
427                 goto free_tx_queue;
428         }
429         err = gve_rx_alloc_rings(priv);
430         if (err)
431                 goto free_rx;
432         /* Add tx napi & init sync stats*/
433         for (i = 0; i < priv->tx_cfg.num_queues; i++) {
434                 u64_stats_init(&priv->tx[i].statss);
435                 ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
436                 gve_add_napi(priv, ntfy_idx);
437         }
438         /* Add rx napi  & init sync stats*/
439         for (i = 0; i < priv->rx_cfg.num_queues; i++) {
440                 u64_stats_init(&priv->rx[i].statss);
441                 ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
442                 gve_add_napi(priv, ntfy_idx);
443         }
444
445         return 0;
446
447 free_rx:
448         kvfree(priv->rx);
449         priv->rx = NULL;
450 free_tx_queue:
451         gve_tx_free_rings(priv);
452 free_tx:
453         kvfree(priv->tx);
454         priv->tx = NULL;
455         return err;
456 }
457
458 static int gve_destroy_rings(struct gve_priv *priv)
459 {
460         int err;
461         int i;
462
463         for (i = 0; i < priv->tx_cfg.num_queues; i++) {
464                 err = gve_adminq_destroy_tx_queue(priv, i);
465                 if (err) {
466                         netif_err(priv, drv, priv->dev,
467                                   "failed to destroy tx queue %d\n",
468                                   i);
469                         /* This failure will trigger a reset - no need to clean
470                          * up
471                          */
472                         return err;
473                 }
474                 netif_dbg(priv, drv, priv->dev, "destroyed tx queue %d\n", i);
475         }
476         for (i = 0; i < priv->rx_cfg.num_queues; i++) {
477                 err = gve_adminq_destroy_rx_queue(priv, i);
478                 if (err) {
479                         netif_err(priv, drv, priv->dev,
480                                   "failed to destroy rx queue %d\n",
481                                   i);
482                         /* This failure will trigger a reset - no need to clean
483                          * up
484                          */
485                         return err;
486                 }
487                 netif_dbg(priv, drv, priv->dev, "destroyed rx queue %d\n", i);
488         }
489         return 0;
490 }
491
492 static void gve_free_rings(struct gve_priv *priv)
493 {
494         int ntfy_idx;
495         int i;
496
497         if (priv->tx) {
498                 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
499                         ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
500                         gve_remove_napi(priv, ntfy_idx);
501                 }
502                 gve_tx_free_rings(priv);
503                 kvfree(priv->tx);
504                 priv->tx = NULL;
505         }
506         if (priv->rx) {
507                 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
508                         ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
509                         gve_remove_napi(priv, ntfy_idx);
510                 }
511                 gve_rx_free_rings(priv);
512                 kvfree(priv->rx);
513                 priv->rx = NULL;
514         }
515 }
516
517 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
518                    struct page **page, dma_addr_t *dma,
519                    enum dma_data_direction dir)
520 {
521         *page = alloc_page(GFP_KERNEL);
522         if (!*page) {
523                 priv->page_alloc_fail++;
524                 return -ENOMEM;
525         }
526         *dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
527         if (dma_mapping_error(dev, *dma)) {
528                 priv->dma_mapping_error++;
529                 put_page(*page);
530                 return -ENOMEM;
531         }
532         return 0;
533 }
534
535 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
536                                      int pages)
537 {
538         struct gve_queue_page_list *qpl = &priv->qpls[id];
539         int err;
540         int i;
541
542         if (pages + priv->num_registered_pages > priv->max_registered_pages) {
543                 netif_err(priv, drv, priv->dev,
544                           "Reached max number of registered pages %llu > %llu\n",
545                           pages + priv->num_registered_pages,
546                           priv->max_registered_pages);
547                 return -EINVAL;
548         }
549
550         qpl->id = id;
551         qpl->num_entries = 0;
552         qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
553         /* caller handles clean up */
554         if (!qpl->pages)
555                 return -ENOMEM;
556         qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses),
557                                    GFP_KERNEL);
558         /* caller handles clean up */
559         if (!qpl->page_buses)
560                 return -ENOMEM;
561
562         for (i = 0; i < pages; i++) {
563                 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
564                                      &qpl->page_buses[i],
565                                      gve_qpl_dma_dir(priv, id));
566                 /* caller handles clean up */
567                 if (err)
568                         return -ENOMEM;
569                 qpl->num_entries++;
570         }
571         priv->num_registered_pages += pages;
572
573         return 0;
574 }
575
576 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
577                    enum dma_data_direction dir)
578 {
579         if (!dma_mapping_error(dev, dma))
580                 dma_unmap_page(dev, dma, PAGE_SIZE, dir);
581         if (page)
582                 put_page(page);
583 }
584
585 static void gve_free_queue_page_list(struct gve_priv *priv,
586                                      int id)
587 {
588         struct gve_queue_page_list *qpl = &priv->qpls[id];
589         int i;
590
591         if (!qpl->pages)
592                 return;
593         if (!qpl->page_buses)
594                 goto free_pages;
595
596         for (i = 0; i < qpl->num_entries; i++)
597                 gve_free_page(&priv->pdev->dev, qpl->pages[i],
598                               qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
599
600         kvfree(qpl->page_buses);
601 free_pages:
602         kvfree(qpl->pages);
603         priv->num_registered_pages -= qpl->num_entries;
604 }
605
606 static int gve_alloc_qpls(struct gve_priv *priv)
607 {
608         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
609         int i, j;
610         int err;
611
612         priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
613         if (!priv->qpls)
614                 return -ENOMEM;
615
616         for (i = 0; i < gve_num_tx_qpls(priv); i++) {
617                 err = gve_alloc_queue_page_list(priv, i,
618                                                 priv->tx_pages_per_qpl);
619                 if (err)
620                         goto free_qpls;
621         }
622         for (; i < num_qpls; i++) {
623                 err = gve_alloc_queue_page_list(priv, i,
624                                                 priv->rx_pages_per_qpl);
625                 if (err)
626                         goto free_qpls;
627         }
628
629         priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
630                                      sizeof(unsigned long) * BITS_PER_BYTE;
631         priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) *
632                                             sizeof(unsigned long), GFP_KERNEL);
633         if (!priv->qpl_cfg.qpl_id_map) {
634                 err = -ENOMEM;
635                 goto free_qpls;
636         }
637
638         return 0;
639
640 free_qpls:
641         for (j = 0; j <= i; j++)
642                 gve_free_queue_page_list(priv, j);
643         kvfree(priv->qpls);
644         return err;
645 }
646
647 static void gve_free_qpls(struct gve_priv *priv)
648 {
649         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
650         int i;
651
652         kvfree(priv->qpl_cfg.qpl_id_map);
653
654         for (i = 0; i < num_qpls; i++)
655                 gve_free_queue_page_list(priv, i);
656
657         kvfree(priv->qpls);
658 }
659
660 /* Use this to schedule a reset when the device is capable of continuing
661  * to handle other requests in its current state. If it is not, do a reset
662  * in thread instead.
663  */
664 void gve_schedule_reset(struct gve_priv *priv)
665 {
666         gve_set_do_reset(priv);
667         queue_work(priv->gve_wq, &priv->service_task);
668 }
669
670 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
671 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
672 static void gve_turndown(struct gve_priv *priv);
673 static void gve_turnup(struct gve_priv *priv);
674
675 static int gve_open(struct net_device *dev)
676 {
677         struct gve_priv *priv = netdev_priv(dev);
678         int err;
679
680         err = gve_alloc_qpls(priv);
681         if (err)
682                 return err;
683         err = gve_alloc_rings(priv);
684         if (err)
685                 goto free_qpls;
686
687         err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
688         if (err)
689                 goto free_rings;
690         err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
691         if (err)
692                 goto free_rings;
693
694         err = gve_register_qpls(priv);
695         if (err)
696                 goto reset;
697         err = gve_create_rings(priv);
698         if (err)
699                 goto reset;
700         gve_set_device_rings_ok(priv);
701
702         gve_turnup(priv);
703         netif_carrier_on(dev);
704         priv->interface_up_cnt++;
705         return 0;
706
707 free_rings:
708         gve_free_rings(priv);
709 free_qpls:
710         gve_free_qpls(priv);
711         return err;
712
713 reset:
714         /* This must have been called from a reset due to the rtnl lock
715          * so just return at this point.
716          */
717         if (gve_get_reset_in_progress(priv))
718                 return err;
719         /* Otherwise reset before returning */
720         gve_reset_and_teardown(priv, true);
721         /* if this fails there is nothing we can do so just ignore the return */
722         gve_reset_recovery(priv, false);
723         /* return the original error */
724         return err;
725 }
726
727 static int gve_close(struct net_device *dev)
728 {
729         struct gve_priv *priv = netdev_priv(dev);
730         int err;
731
732         netif_carrier_off(dev);
733         if (gve_get_device_rings_ok(priv)) {
734                 gve_turndown(priv);
735                 err = gve_destroy_rings(priv);
736                 if (err)
737                         goto err;
738                 err = gve_unregister_qpls(priv);
739                 if (err)
740                         goto err;
741                 gve_clear_device_rings_ok(priv);
742         }
743
744         gve_free_rings(priv);
745         gve_free_qpls(priv);
746         priv->interface_down_cnt++;
747         return 0;
748
749 err:
750         /* This must have been called from a reset due to the rtnl lock
751          * so just return at this point.
752          */
753         if (gve_get_reset_in_progress(priv))
754                 return err;
755         /* Otherwise reset before returning */
756         gve_reset_and_teardown(priv, true);
757         return gve_reset_recovery(priv, false);
758 }
759
760 int gve_adjust_queues(struct gve_priv *priv,
761                       struct gve_queue_config new_rx_config,
762                       struct gve_queue_config new_tx_config)
763 {
764         int err;
765
766         if (netif_carrier_ok(priv->dev)) {
767                 /* To make this process as simple as possible we teardown the
768                  * device, set the new configuration, and then bring the device
769                  * up again.
770                  */
771                 err = gve_close(priv->dev);
772                 /* we have already tried to reset in close,
773                  * just fail at this point
774                  */
775                 if (err)
776                         return err;
777                 priv->tx_cfg = new_tx_config;
778                 priv->rx_cfg = new_rx_config;
779
780                 err = gve_open(priv->dev);
781                 if (err)
782                         goto err;
783
784                 return 0;
785         }
786         /* Set the config for the next up. */
787         priv->tx_cfg = new_tx_config;
788         priv->rx_cfg = new_rx_config;
789
790         return 0;
791 err:
792         netif_err(priv, drv, priv->dev,
793                   "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
794         gve_turndown(priv);
795         return err;
796 }
797
798 static void gve_turndown(struct gve_priv *priv)
799 {
800         int idx;
801
802         if (netif_carrier_ok(priv->dev))
803                 netif_carrier_off(priv->dev);
804
805         if (!gve_get_napi_enabled(priv))
806                 return;
807
808         /* Disable napi to prevent more work from coming in */
809         for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
810                 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
811                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
812
813                 napi_disable(&block->napi);
814         }
815         for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
816                 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
817                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
818
819                 napi_disable(&block->napi);
820         }
821
822         /* Stop tx queues */
823         netif_tx_disable(priv->dev);
824
825         gve_clear_napi_enabled(priv);
826 }
827
828 static void gve_turnup(struct gve_priv *priv)
829 {
830         int idx;
831
832         /* Start the tx queues */
833         netif_tx_start_all_queues(priv->dev);
834
835         /* Enable napi and unmask interrupts for all queues */
836         for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
837                 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
838                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
839
840                 napi_enable(&block->napi);
841                 iowrite32be(0, gve_irq_doorbell(priv, block));
842         }
843         for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
844                 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
845                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
846
847                 napi_enable(&block->napi);
848                 iowrite32be(0, gve_irq_doorbell(priv, block));
849         }
850
851         gve_set_napi_enabled(priv);
852 }
853
854 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
855 {
856         struct gve_priv *priv = netdev_priv(dev);
857
858         gve_schedule_reset(priv);
859         priv->tx_timeo_cnt++;
860 }
861
862 static const struct net_device_ops gve_netdev_ops = {
863         .ndo_start_xmit         =       gve_tx,
864         .ndo_open               =       gve_open,
865         .ndo_stop               =       gve_close,
866         .ndo_get_stats64        =       gve_get_stats,
867         .ndo_tx_timeout         =       gve_tx_timeout,
868 };
869
870 static void gve_handle_status(struct gve_priv *priv, u32 status)
871 {
872         if (GVE_DEVICE_STATUS_RESET_MASK & status) {
873                 dev_info(&priv->pdev->dev, "Device requested reset.\n");
874                 gve_set_do_reset(priv);
875         }
876 }
877
878 static void gve_handle_reset(struct gve_priv *priv)
879 {
880         /* A service task will be scheduled at the end of probe to catch any
881          * resets that need to happen, and we don't want to reset until
882          * probe is done.
883          */
884         if (gve_get_probe_in_progress(priv))
885                 return;
886
887         if (gve_get_do_reset(priv)) {
888                 rtnl_lock();
889                 gve_reset(priv, false);
890                 rtnl_unlock();
891         }
892 }
893
894 /* Handle NIC status register changes and reset requests */
895 static void gve_service_task(struct work_struct *work)
896 {
897         struct gve_priv *priv = container_of(work, struct gve_priv,
898                                              service_task);
899
900         gve_handle_status(priv,
901                           ioread32be(&priv->reg_bar0->device_status));
902
903         gve_handle_reset(priv);
904 }
905
906 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
907 {
908         int num_ntfy;
909         int err;
910
911         /* Set up the adminq */
912         err = gve_adminq_alloc(&priv->pdev->dev, priv);
913         if (err) {
914                 dev_err(&priv->pdev->dev,
915                         "Failed to alloc admin queue: err=%d\n", err);
916                 return err;
917         }
918
919         if (skip_describe_device)
920                 goto setup_device;
921
922         /* Get the initial information we need from the device */
923         err = gve_adminq_describe_device(priv);
924         if (err) {
925                 dev_err(&priv->pdev->dev,
926                         "Could not get device information: err=%d\n", err);
927                 goto err;
928         }
929         if (priv->dev->max_mtu > PAGE_SIZE) {
930                 priv->dev->max_mtu = PAGE_SIZE;
931                 err = gve_adminq_set_mtu(priv, priv->dev->mtu);
932                 if (err) {
933                         netif_err(priv, drv, priv->dev, "Could not set mtu");
934                         goto err;
935                 }
936         }
937         priv->dev->mtu = priv->dev->max_mtu;
938         num_ntfy = pci_msix_vec_count(priv->pdev);
939         if (num_ntfy <= 0) {
940                 dev_err(&priv->pdev->dev,
941                         "could not count MSI-x vectors: err=%d\n", num_ntfy);
942                 err = num_ntfy;
943                 goto err;
944         } else if (num_ntfy < GVE_MIN_MSIX) {
945                 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
946                         GVE_MIN_MSIX, num_ntfy);
947                 err = -EINVAL;
948                 goto err;
949         }
950
951         priv->num_registered_pages = 0;
952         priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
953         /* gvnic has one Notification Block per MSI-x vector, except for the
954          * management vector
955          */
956         priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
957         priv->mgmt_msix_idx = priv->num_ntfy_blks;
958
959         priv->tx_cfg.max_queues =
960                 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
961         priv->rx_cfg.max_queues =
962                 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
963
964         priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
965         priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
966         if (priv->default_num_queues > 0) {
967                 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
968                                                 priv->tx_cfg.num_queues);
969                 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
970                                                 priv->rx_cfg.num_queues);
971         }
972
973         netif_info(priv, drv, priv->dev, "TX queues %d, RX queues %d\n",
974                    priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
975         netif_info(priv, drv, priv->dev, "Max TX queues %d, Max RX queues %d\n",
976                    priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
977
978 setup_device:
979         err = gve_setup_device_resources(priv);
980         if (!err)
981                 return 0;
982 err:
983         gve_adminq_free(&priv->pdev->dev, priv);
984         return err;
985 }
986
987 static void gve_teardown_priv_resources(struct gve_priv *priv)
988 {
989         gve_teardown_device_resources(priv);
990         gve_adminq_free(&priv->pdev->dev, priv);
991 }
992
993 static void gve_trigger_reset(struct gve_priv *priv)
994 {
995         /* Reset the device by releasing the AQ */
996         gve_adminq_release(priv);
997 }
998
999 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
1000 {
1001         gve_trigger_reset(priv);
1002         /* With the reset having already happened, close cannot fail */
1003         if (was_up)
1004                 gve_close(priv->dev);
1005         gve_teardown_priv_resources(priv);
1006 }
1007
1008 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
1009 {
1010         int err;
1011
1012         err = gve_init_priv(priv, true);
1013         if (err)
1014                 goto err;
1015         if (was_up) {
1016                 err = gve_open(priv->dev);
1017                 if (err)
1018                         goto err;
1019         }
1020         return 0;
1021 err:
1022         dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
1023         gve_turndown(priv);
1024         return err;
1025 }
1026
1027 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
1028 {
1029         bool was_up = netif_carrier_ok(priv->dev);
1030         int err;
1031
1032         dev_info(&priv->pdev->dev, "Performing reset\n");
1033         gve_clear_do_reset(priv);
1034         gve_set_reset_in_progress(priv);
1035         /* If we aren't attempting to teardown normally, just go turndown and
1036          * reset right away.
1037          */
1038         if (!attempt_teardown) {
1039                 gve_turndown(priv);
1040                 gve_reset_and_teardown(priv, was_up);
1041         } else {
1042                 /* Otherwise attempt to close normally */
1043                 if (was_up) {
1044                         err = gve_close(priv->dev);
1045                         /* If that fails reset as we did above */
1046                         if (err)
1047                                 gve_reset_and_teardown(priv, was_up);
1048                 }
1049                 /* Clean up any remaining resources */
1050                 gve_teardown_priv_resources(priv);
1051         }
1052
1053         /* Set it all back up */
1054         err = gve_reset_recovery(priv, was_up);
1055         gve_clear_reset_in_progress(priv);
1056         priv->reset_cnt++;
1057         priv->interface_up_cnt = 0;
1058         priv->interface_down_cnt = 0;
1059         return err;
1060 }
1061
1062 static void gve_write_version(u8 __iomem *driver_version_register)
1063 {
1064         const char *c = gve_version_prefix;
1065
1066         while (*c) {
1067                 writeb(*c, driver_version_register);
1068                 c++;
1069         }
1070
1071         c = gve_version_str;
1072         while (*c) {
1073                 writeb(*c, driver_version_register);
1074                 c++;
1075         }
1076         writeb('\n', driver_version_register);
1077 }
1078
1079 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1080 {
1081         int max_tx_queues, max_rx_queues;
1082         struct net_device *dev;
1083         __be32 __iomem *db_bar;
1084         struct gve_registers __iomem *reg_bar;
1085         struct gve_priv *priv;
1086         int err;
1087
1088         err = pci_enable_device(pdev);
1089         if (err)
1090                 return -ENXIO;
1091
1092         err = pci_request_regions(pdev, "gvnic-cfg");
1093         if (err)
1094                 goto abort_with_enabled;
1095
1096         pci_set_master(pdev);
1097
1098         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1099         if (err) {
1100                 dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
1101                 goto abort_with_pci_region;
1102         }
1103
1104         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1105         if (err) {
1106                 dev_err(&pdev->dev,
1107                         "Failed to set consistent dma mask: err=%d\n", err);
1108                 goto abort_with_pci_region;
1109         }
1110
1111         reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
1112         if (!reg_bar) {
1113                 dev_err(&pdev->dev, "Failed to map pci bar!\n");
1114                 err = -ENOMEM;
1115                 goto abort_with_pci_region;
1116         }
1117
1118         db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
1119         if (!db_bar) {
1120                 dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
1121                 err = -ENOMEM;
1122                 goto abort_with_reg_bar;
1123         }
1124
1125         gve_write_version(&reg_bar->driver_version);
1126         /* Get max queues to alloc etherdev */
1127         max_rx_queues = ioread32be(&reg_bar->max_tx_queues);
1128         max_tx_queues = ioread32be(&reg_bar->max_rx_queues);
1129         /* Alloc and setup the netdev and priv */
1130         dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
1131         if (!dev) {
1132                 dev_err(&pdev->dev, "could not allocate netdev\n");
1133                 goto abort_with_db_bar;
1134         }
1135         SET_NETDEV_DEV(dev, &pdev->dev);
1136         pci_set_drvdata(pdev, dev);
1137         dev->ethtool_ops = &gve_ethtool_ops;
1138         dev->netdev_ops = &gve_netdev_ops;
1139         /* advertise features */
1140         dev->hw_features = NETIF_F_HIGHDMA;
1141         dev->hw_features |= NETIF_F_SG;
1142         dev->hw_features |= NETIF_F_HW_CSUM;
1143         dev->hw_features |= NETIF_F_TSO;
1144         dev->hw_features |= NETIF_F_TSO6;
1145         dev->hw_features |= NETIF_F_TSO_ECN;
1146         dev->hw_features |= NETIF_F_RXCSUM;
1147         dev->hw_features |= NETIF_F_RXHASH;
1148         dev->features = dev->hw_features;
1149         dev->watchdog_timeo = 5 * HZ;
1150         dev->min_mtu = ETH_MIN_MTU;
1151         netif_carrier_off(dev);
1152
1153         priv = netdev_priv(dev);
1154         priv->dev = dev;
1155         priv->pdev = pdev;
1156         priv->msg_enable = DEFAULT_MSG_LEVEL;
1157         priv->reg_bar0 = reg_bar;
1158         priv->db_bar2 = db_bar;
1159         priv->service_task_flags = 0x0;
1160         priv->state_flags = 0x0;
1161
1162         gve_set_probe_in_progress(priv);
1163         priv->gve_wq = alloc_ordered_workqueue("gve", 0);
1164         if (!priv->gve_wq) {
1165                 dev_err(&pdev->dev, "Could not allocate workqueue");
1166                 err = -ENOMEM;
1167                 goto abort_with_netdev;
1168         }
1169         INIT_WORK(&priv->service_task, gve_service_task);
1170         priv->tx_cfg.max_queues = max_tx_queues;
1171         priv->rx_cfg.max_queues = max_rx_queues;
1172
1173         err = gve_init_priv(priv, false);
1174         if (err)
1175                 goto abort_with_wq;
1176
1177         err = register_netdev(dev);
1178         if (err)
1179                 goto abort_with_wq;
1180
1181         dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
1182         gve_clear_probe_in_progress(priv);
1183         queue_work(priv->gve_wq, &priv->service_task);
1184         return 0;
1185
1186 abort_with_wq:
1187         destroy_workqueue(priv->gve_wq);
1188
1189 abort_with_netdev:
1190         free_netdev(dev);
1191
1192 abort_with_db_bar:
1193         pci_iounmap(pdev, db_bar);
1194
1195 abort_with_reg_bar:
1196         pci_iounmap(pdev, reg_bar);
1197
1198 abort_with_pci_region:
1199         pci_release_regions(pdev);
1200
1201 abort_with_enabled:
1202         pci_disable_device(pdev);
1203         return -ENXIO;
1204 }
1205
1206 static void gve_remove(struct pci_dev *pdev)
1207 {
1208         struct net_device *netdev = pci_get_drvdata(pdev);
1209         struct gve_priv *priv = netdev_priv(netdev);
1210         __be32 __iomem *db_bar = priv->db_bar2;
1211         void __iomem *reg_bar = priv->reg_bar0;
1212
1213         unregister_netdev(netdev);
1214         gve_teardown_priv_resources(priv);
1215         destroy_workqueue(priv->gve_wq);
1216         free_netdev(netdev);
1217         pci_iounmap(pdev, db_bar);
1218         pci_iounmap(pdev, reg_bar);
1219         pci_release_regions(pdev);
1220         pci_disable_device(pdev);
1221 }
1222
1223 static const struct pci_device_id gve_id_table[] = {
1224         { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
1225         { }
1226 };
1227
1228 static struct pci_driver gvnic_driver = {
1229         .name           = "gvnic",
1230         .id_table       = gve_id_table,
1231         .probe          = gve_probe,
1232         .remove         = gve_remove,
1233 };
1234
1235 module_pci_driver(gvnic_driver);
1236
1237 MODULE_DEVICE_TABLE(pci, gve_id_table);
1238 MODULE_AUTHOR("Google, Inc.");
1239 MODULE_DESCRIPTION("gVNIC Driver");
1240 MODULE_LICENSE("Dual MIT/GPL");
1241 MODULE_VERSION(GVE_VERSION);