Merge tag 'irqchip-fixes-5.17-1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / net / ethernet / google / gve / gve_main.c
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/sched.h>
13 #include <linux/timer.h>
14 #include <linux/workqueue.h>
15 #include <net/sch_generic.h>
16 #include "gve.h"
17 #include "gve_dqo.h"
18 #include "gve_adminq.h"
19 #include "gve_register.h"
20
21 #define GVE_DEFAULT_RX_COPYBREAK        (256)
22
23 #define DEFAULT_MSG_LEVEL       (NETIF_MSG_DRV | NETIF_MSG_LINK)
24 #define GVE_VERSION             "1.0.0"
25 #define GVE_VERSION_PREFIX      "GVE-"
26
27 // Minimum amount of time between queue kicks in msec (10 seconds)
28 #define MIN_TX_TIMEOUT_GAP (1000 * 10)
29
30 const char gve_version_str[] = GVE_VERSION;
31 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
32
33 static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
34 {
35         struct gve_priv *priv = netdev_priv(dev);
36
37         if (gve_is_gqi(priv))
38                 return gve_tx(skb, dev);
39         else
40                 return gve_tx_dqo(skb, dev);
41 }
42
43 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
44 {
45         struct gve_priv *priv = netdev_priv(dev);
46         unsigned int start;
47         u64 packets, bytes;
48         int ring;
49
50         if (priv->rx) {
51                 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
52                         do {
53                                 start =
54                                   u64_stats_fetch_begin(&priv->rx[ring].statss);
55                                 packets = priv->rx[ring].rpackets;
56                                 bytes = priv->rx[ring].rbytes;
57                         } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
58                                                        start));
59                         s->rx_packets += packets;
60                         s->rx_bytes += bytes;
61                 }
62         }
63         if (priv->tx) {
64                 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
65                         do {
66                                 start =
67                                   u64_stats_fetch_begin(&priv->tx[ring].statss);
68                                 packets = priv->tx[ring].pkt_done;
69                                 bytes = priv->tx[ring].bytes_done;
70                         } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
71                                                        start));
72                         s->tx_packets += packets;
73                         s->tx_bytes += bytes;
74                 }
75         }
76 }
77
78 static int gve_alloc_counter_array(struct gve_priv *priv)
79 {
80         priv->counter_array =
81                 dma_alloc_coherent(&priv->pdev->dev,
82                                    priv->num_event_counters *
83                                    sizeof(*priv->counter_array),
84                                    &priv->counter_array_bus, GFP_KERNEL);
85         if (!priv->counter_array)
86                 return -ENOMEM;
87
88         return 0;
89 }
90
91 static void gve_free_counter_array(struct gve_priv *priv)
92 {
93         if (!priv->counter_array)
94                 return;
95
96         dma_free_coherent(&priv->pdev->dev,
97                           priv->num_event_counters *
98                           sizeof(*priv->counter_array),
99                           priv->counter_array, priv->counter_array_bus);
100         priv->counter_array = NULL;
101 }
102
103 /* NIC requests to report stats */
104 static void gve_stats_report_task(struct work_struct *work)
105 {
106         struct gve_priv *priv = container_of(work, struct gve_priv,
107                                              stats_report_task);
108         if (gve_get_do_report_stats(priv)) {
109                 gve_handle_report_stats(priv);
110                 gve_clear_do_report_stats(priv);
111         }
112 }
113
114 static void gve_stats_report_schedule(struct gve_priv *priv)
115 {
116         if (!gve_get_probe_in_progress(priv) &&
117             !gve_get_reset_in_progress(priv)) {
118                 gve_set_do_report_stats(priv);
119                 queue_work(priv->gve_wq, &priv->stats_report_task);
120         }
121 }
122
123 static void gve_stats_report_timer(struct timer_list *t)
124 {
125         struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
126
127         mod_timer(&priv->stats_report_timer,
128                   round_jiffies(jiffies +
129                   msecs_to_jiffies(priv->stats_report_timer_period)));
130         gve_stats_report_schedule(priv);
131 }
132
133 static int gve_alloc_stats_report(struct gve_priv *priv)
134 {
135         int tx_stats_num, rx_stats_num;
136
137         tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
138                        priv->tx_cfg.num_queues;
139         rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
140                        priv->rx_cfg.num_queues;
141         priv->stats_report_len = struct_size(priv->stats_report, stats,
142                                              tx_stats_num + rx_stats_num);
143         priv->stats_report =
144                 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
145                                    &priv->stats_report_bus, GFP_KERNEL);
146         if (!priv->stats_report)
147                 return -ENOMEM;
148         /* Set up timer for the report-stats task */
149         timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
150         priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
151         return 0;
152 }
153
154 static void gve_free_stats_report(struct gve_priv *priv)
155 {
156         if (!priv->stats_report)
157                 return;
158
159         del_timer_sync(&priv->stats_report_timer);
160         dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
161                           priv->stats_report, priv->stats_report_bus);
162         priv->stats_report = NULL;
163 }
164
165 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
166 {
167         struct gve_priv *priv = arg;
168
169         queue_work(priv->gve_wq, &priv->service_task);
170         return IRQ_HANDLED;
171 }
172
173 static irqreturn_t gve_intr(int irq, void *arg)
174 {
175         struct gve_notify_block *block = arg;
176         struct gve_priv *priv = block->priv;
177
178         iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
179         napi_schedule_irqoff(&block->napi);
180         return IRQ_HANDLED;
181 }
182
183 static irqreturn_t gve_intr_dqo(int irq, void *arg)
184 {
185         struct gve_notify_block *block = arg;
186
187         /* Interrupts are automatically masked */
188         napi_schedule_irqoff(&block->napi);
189         return IRQ_HANDLED;
190 }
191
192 static int gve_napi_poll(struct napi_struct *napi, int budget)
193 {
194         struct gve_notify_block *block;
195         __be32 __iomem *irq_doorbell;
196         bool reschedule = false;
197         struct gve_priv *priv;
198         int work_done = 0;
199
200         block = container_of(napi, struct gve_notify_block, napi);
201         priv = block->priv;
202
203         if (block->tx)
204                 reschedule |= gve_tx_poll(block, budget);
205         if (block->rx) {
206                 work_done = gve_rx_poll(block, budget);
207                 reschedule |= work_done == budget;
208         }
209
210         if (reschedule)
211                 return budget;
212
213        /* Complete processing - don't unmask irq if busy polling is enabled */
214         if (likely(napi_complete_done(napi, work_done))) {
215                 irq_doorbell = gve_irq_doorbell(priv, block);
216                 iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
217
218                 /* Ensure IRQ ACK is visible before we check pending work.
219                  * If queue had issued updates, it would be truly visible.
220                  */
221                 mb();
222
223                 if (block->tx)
224                         reschedule |= gve_tx_clean_pending(priv, block->tx);
225                 if (block->rx)
226                         reschedule |= gve_rx_work_pending(block->rx);
227
228                 if (reschedule && napi_reschedule(napi))
229                         iowrite32be(GVE_IRQ_MASK, irq_doorbell);
230         }
231         return work_done;
232 }
233
234 static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
235 {
236         struct gve_notify_block *block =
237                 container_of(napi, struct gve_notify_block, napi);
238         struct gve_priv *priv = block->priv;
239         bool reschedule = false;
240         int work_done = 0;
241
242         /* Clear PCI MSI-X Pending Bit Array (PBA)
243          *
244          * This bit is set if an interrupt event occurs while the vector is
245          * masked. If this bit is set and we reenable the interrupt, it will
246          * fire again. Since we're just about to poll the queue state, we don't
247          * need it to fire again.
248          *
249          * Under high softirq load, it's possible that the interrupt condition
250          * is triggered twice before we got the chance to process it.
251          */
252         gve_write_irq_doorbell_dqo(priv, block,
253                                    GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
254
255         if (block->tx)
256                 reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
257
258         if (block->rx) {
259                 work_done = gve_rx_poll_dqo(block, budget);
260                 reschedule |= work_done == budget;
261         }
262
263         if (reschedule)
264                 return budget;
265
266         if (likely(napi_complete_done(napi, work_done))) {
267                 /* Enable interrupts again.
268                  *
269                  * We don't need to repoll afterwards because HW supports the
270                  * PCI MSI-X PBA feature.
271                  *
272                  * Another interrupt would be triggered if a new event came in
273                  * since the last one.
274                  */
275                 gve_write_irq_doorbell_dqo(priv, block,
276                                            GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
277         }
278
279         return work_done;
280 }
281
282 static int gve_alloc_notify_blocks(struct gve_priv *priv)
283 {
284         int num_vecs_requested = priv->num_ntfy_blks + 1;
285         char *name = priv->dev->name;
286         unsigned int active_cpus;
287         int vecs_enabled;
288         int i, j;
289         int err;
290
291         priv->msix_vectors = kvcalloc(num_vecs_requested,
292                                       sizeof(*priv->msix_vectors), GFP_KERNEL);
293         if (!priv->msix_vectors)
294                 return -ENOMEM;
295         for (i = 0; i < num_vecs_requested; i++)
296                 priv->msix_vectors[i].entry = i;
297         vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
298                                              GVE_MIN_MSIX, num_vecs_requested);
299         if (vecs_enabled < 0) {
300                 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
301                         GVE_MIN_MSIX, vecs_enabled);
302                 err = vecs_enabled;
303                 goto abort_with_msix_vectors;
304         }
305         if (vecs_enabled != num_vecs_requested) {
306                 int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
307                 int vecs_per_type = new_num_ntfy_blks / 2;
308                 int vecs_left = new_num_ntfy_blks % 2;
309
310                 priv->num_ntfy_blks = new_num_ntfy_blks;
311                 priv->mgmt_msix_idx = priv->num_ntfy_blks;
312                 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
313                                                 vecs_per_type);
314                 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
315                                                 vecs_per_type + vecs_left);
316                 dev_err(&priv->pdev->dev,
317                         "Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
318                         vecs_enabled, priv->tx_cfg.max_queues,
319                         priv->rx_cfg.max_queues);
320                 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
321                         priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
322                 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
323                         priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
324         }
325         /* Half the notification blocks go to TX and half to RX */
326         active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
327
328         /* Setup Management Vector  - the last vector */
329         snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
330                  name);
331         err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
332                           gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
333         if (err) {
334                 dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
335                 goto abort_with_msix_enabled;
336         }
337         priv->irq_db_indices =
338                 dma_alloc_coherent(&priv->pdev->dev,
339                                    priv->num_ntfy_blks *
340                                    sizeof(*priv->irq_db_indices),
341                                    &priv->irq_db_indices_bus, GFP_KERNEL);
342         if (!priv->irq_db_indices) {
343                 err = -ENOMEM;
344                 goto abort_with_mgmt_vector;
345         }
346
347         priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks *
348                                      sizeof(*priv->ntfy_blocks), GFP_KERNEL);
349         if (!priv->ntfy_blocks) {
350                 err = -ENOMEM;
351                 goto abort_with_irq_db_indices;
352         }
353
354         /* Setup the other blocks - the first n-1 vectors */
355         for (i = 0; i < priv->num_ntfy_blks; i++) {
356                 struct gve_notify_block *block = &priv->ntfy_blocks[i];
357                 int msix_idx = i;
358
359                 snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
360                          name, i);
361                 block->priv = priv;
362                 err = request_irq(priv->msix_vectors[msix_idx].vector,
363                                   gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
364                                   0, block->name, block);
365                 if (err) {
366                         dev_err(&priv->pdev->dev,
367                                 "Failed to receive msix vector %d\n", i);
368                         goto abort_with_some_ntfy_blocks;
369                 }
370                 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
371                                       get_cpu_mask(i % active_cpus));
372                 block->irq_db_index = &priv->irq_db_indices[i].index;
373         }
374         return 0;
375 abort_with_some_ntfy_blocks:
376         for (j = 0; j < i; j++) {
377                 struct gve_notify_block *block = &priv->ntfy_blocks[j];
378                 int msix_idx = j;
379
380                 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
381                                       NULL);
382                 free_irq(priv->msix_vectors[msix_idx].vector, block);
383         }
384         kvfree(priv->ntfy_blocks);
385         priv->ntfy_blocks = NULL;
386 abort_with_irq_db_indices:
387         dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
388                           sizeof(*priv->irq_db_indices),
389                           priv->irq_db_indices, priv->irq_db_indices_bus);
390         priv->irq_db_indices = NULL;
391 abort_with_mgmt_vector:
392         free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
393 abort_with_msix_enabled:
394         pci_disable_msix(priv->pdev);
395 abort_with_msix_vectors:
396         kvfree(priv->msix_vectors);
397         priv->msix_vectors = NULL;
398         return err;
399 }
400
401 static void gve_free_notify_blocks(struct gve_priv *priv)
402 {
403         int i;
404
405         if (!priv->msix_vectors)
406                 return;
407
408         /* Free the irqs */
409         for (i = 0; i < priv->num_ntfy_blks; i++) {
410                 struct gve_notify_block *block = &priv->ntfy_blocks[i];
411                 int msix_idx = i;
412
413                 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
414                                       NULL);
415                 free_irq(priv->msix_vectors[msix_idx].vector, block);
416         }
417         free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
418         kvfree(priv->ntfy_blocks);
419         priv->ntfy_blocks = NULL;
420         dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
421                           sizeof(*priv->irq_db_indices),
422                           priv->irq_db_indices, priv->irq_db_indices_bus);
423         priv->irq_db_indices = NULL;
424         pci_disable_msix(priv->pdev);
425         kvfree(priv->msix_vectors);
426         priv->msix_vectors = NULL;
427 }
428
429 static int gve_setup_device_resources(struct gve_priv *priv)
430 {
431         int err;
432
433         err = gve_alloc_counter_array(priv);
434         if (err)
435                 return err;
436         err = gve_alloc_notify_blocks(priv);
437         if (err)
438                 goto abort_with_counter;
439         err = gve_alloc_stats_report(priv);
440         if (err)
441                 goto abort_with_ntfy_blocks;
442         err = gve_adminq_configure_device_resources(priv,
443                                                     priv->counter_array_bus,
444                                                     priv->num_event_counters,
445                                                     priv->irq_db_indices_bus,
446                                                     priv->num_ntfy_blks);
447         if (unlikely(err)) {
448                 dev_err(&priv->pdev->dev,
449                         "could not setup device_resources: err=%d\n", err);
450                 err = -ENXIO;
451                 goto abort_with_stats_report;
452         }
453
454         if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
455                 priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
456                                                GFP_KERNEL);
457                 if (!priv->ptype_lut_dqo) {
458                         err = -ENOMEM;
459                         goto abort_with_stats_report;
460                 }
461                 err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
462                 if (err) {
463                         dev_err(&priv->pdev->dev,
464                                 "Failed to get ptype map: err=%d\n", err);
465                         goto abort_with_ptype_lut;
466                 }
467         }
468
469         err = gve_adminq_report_stats(priv, priv->stats_report_len,
470                                       priv->stats_report_bus,
471                                       GVE_STATS_REPORT_TIMER_PERIOD);
472         if (err)
473                 dev_err(&priv->pdev->dev,
474                         "Failed to report stats: err=%d\n", err);
475         gve_set_device_resources_ok(priv);
476         return 0;
477
478 abort_with_ptype_lut:
479         kvfree(priv->ptype_lut_dqo);
480         priv->ptype_lut_dqo = NULL;
481 abort_with_stats_report:
482         gve_free_stats_report(priv);
483 abort_with_ntfy_blocks:
484         gve_free_notify_blocks(priv);
485 abort_with_counter:
486         gve_free_counter_array(priv);
487
488         return err;
489 }
490
491 static void gve_trigger_reset(struct gve_priv *priv);
492
493 static void gve_teardown_device_resources(struct gve_priv *priv)
494 {
495         int err;
496
497         /* Tell device its resources are being freed */
498         if (gve_get_device_resources_ok(priv)) {
499                 /* detach the stats report */
500                 err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
501                 if (err) {
502                         dev_err(&priv->pdev->dev,
503                                 "Failed to detach stats report: err=%d\n", err);
504                         gve_trigger_reset(priv);
505                 }
506                 err = gve_adminq_deconfigure_device_resources(priv);
507                 if (err) {
508                         dev_err(&priv->pdev->dev,
509                                 "Could not deconfigure device resources: err=%d\n",
510                                 err);
511                         gve_trigger_reset(priv);
512                 }
513         }
514
515         kvfree(priv->ptype_lut_dqo);
516         priv->ptype_lut_dqo = NULL;
517
518         gve_free_counter_array(priv);
519         gve_free_notify_blocks(priv);
520         gve_free_stats_report(priv);
521         gve_clear_device_resources_ok(priv);
522 }
523
524 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
525                          int (*gve_poll)(struct napi_struct *, int))
526 {
527         struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
528
529         netif_napi_add(priv->dev, &block->napi, gve_poll,
530                        NAPI_POLL_WEIGHT);
531 }
532
533 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
534 {
535         struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
536
537         netif_napi_del(&block->napi);
538 }
539
540 static int gve_register_qpls(struct gve_priv *priv)
541 {
542         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
543         int err;
544         int i;
545
546         for (i = 0; i < num_qpls; i++) {
547                 err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
548                 if (err) {
549                         netif_err(priv, drv, priv->dev,
550                                   "failed to register queue page list %d\n",
551                                   priv->qpls[i].id);
552                         /* This failure will trigger a reset - no need to clean
553                          * up
554                          */
555                         return err;
556                 }
557         }
558         return 0;
559 }
560
561 static int gve_unregister_qpls(struct gve_priv *priv)
562 {
563         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
564         int err;
565         int i;
566
567         for (i = 0; i < num_qpls; i++) {
568                 err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
569                 /* This failure will trigger a reset - no need to clean up */
570                 if (err) {
571                         netif_err(priv, drv, priv->dev,
572                                   "Failed to unregister queue page list %d\n",
573                                   priv->qpls[i].id);
574                         return err;
575                 }
576         }
577         return 0;
578 }
579
580 static int gve_create_rings(struct gve_priv *priv)
581 {
582         int err;
583         int i;
584
585         err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
586         if (err) {
587                 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
588                           priv->tx_cfg.num_queues);
589                 /* This failure will trigger a reset - no need to clean
590                  * up
591                  */
592                 return err;
593         }
594         netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
595                   priv->tx_cfg.num_queues);
596
597         err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
598         if (err) {
599                 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
600                           priv->rx_cfg.num_queues);
601                 /* This failure will trigger a reset - no need to clean
602                  * up
603                  */
604                 return err;
605         }
606         netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
607                   priv->rx_cfg.num_queues);
608
609         if (gve_is_gqi(priv)) {
610                 /* Rx data ring has been prefilled with packet buffers at queue
611                  * allocation time.
612                  *
613                  * Write the doorbell to provide descriptor slots and packet
614                  * buffers to the NIC.
615                  */
616                 for (i = 0; i < priv->rx_cfg.num_queues; i++)
617                         gve_rx_write_doorbell(priv, &priv->rx[i]);
618         } else {
619                 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
620                         /* Post buffers and ring doorbell. */
621                         gve_rx_post_buffers_dqo(&priv->rx[i]);
622                 }
623         }
624
625         return 0;
626 }
627
628 static void add_napi_init_sync_stats(struct gve_priv *priv,
629                                      int (*napi_poll)(struct napi_struct *napi,
630                                                       int budget))
631 {
632         int i;
633
634         /* Add tx napi & init sync stats*/
635         for (i = 0; i < priv->tx_cfg.num_queues; i++) {
636                 int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
637
638                 u64_stats_init(&priv->tx[i].statss);
639                 priv->tx[i].ntfy_id = ntfy_idx;
640                 gve_add_napi(priv, ntfy_idx, napi_poll);
641         }
642         /* Add rx napi  & init sync stats*/
643         for (i = 0; i < priv->rx_cfg.num_queues; i++) {
644                 int ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
645
646                 u64_stats_init(&priv->rx[i].statss);
647                 priv->rx[i].ntfy_id = ntfy_idx;
648                 gve_add_napi(priv, ntfy_idx, napi_poll);
649         }
650 }
651
652 static void gve_tx_free_rings(struct gve_priv *priv)
653 {
654         if (gve_is_gqi(priv)) {
655                 gve_tx_free_rings_gqi(priv);
656         } else {
657                 gve_tx_free_rings_dqo(priv);
658         }
659 }
660
661 static int gve_alloc_rings(struct gve_priv *priv)
662 {
663         int err;
664
665         /* Setup tx rings */
666         priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx),
667                             GFP_KERNEL);
668         if (!priv->tx)
669                 return -ENOMEM;
670
671         if (gve_is_gqi(priv))
672                 err = gve_tx_alloc_rings(priv);
673         else
674                 err = gve_tx_alloc_rings_dqo(priv);
675         if (err)
676                 goto free_tx;
677
678         /* Setup rx rings */
679         priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx),
680                             GFP_KERNEL);
681         if (!priv->rx) {
682                 err = -ENOMEM;
683                 goto free_tx_queue;
684         }
685
686         if (gve_is_gqi(priv))
687                 err = gve_rx_alloc_rings(priv);
688         else
689                 err = gve_rx_alloc_rings_dqo(priv);
690         if (err)
691                 goto free_rx;
692
693         if (gve_is_gqi(priv))
694                 add_napi_init_sync_stats(priv, gve_napi_poll);
695         else
696                 add_napi_init_sync_stats(priv, gve_napi_poll_dqo);
697
698         return 0;
699
700 free_rx:
701         kvfree(priv->rx);
702         priv->rx = NULL;
703 free_tx_queue:
704         gve_tx_free_rings(priv);
705 free_tx:
706         kvfree(priv->tx);
707         priv->tx = NULL;
708         return err;
709 }
710
711 static int gve_destroy_rings(struct gve_priv *priv)
712 {
713         int err;
714
715         err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
716         if (err) {
717                 netif_err(priv, drv, priv->dev,
718                           "failed to destroy tx queues\n");
719                 /* This failure will trigger a reset - no need to clean up */
720                 return err;
721         }
722         netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
723         err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
724         if (err) {
725                 netif_err(priv, drv, priv->dev,
726                           "failed to destroy rx queues\n");
727                 /* This failure will trigger a reset - no need to clean up */
728                 return err;
729         }
730         netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
731         return 0;
732 }
733
734 static void gve_rx_free_rings(struct gve_priv *priv)
735 {
736         if (gve_is_gqi(priv))
737                 gve_rx_free_rings_gqi(priv);
738         else
739                 gve_rx_free_rings_dqo(priv);
740 }
741
742 static void gve_free_rings(struct gve_priv *priv)
743 {
744         int ntfy_idx;
745         int i;
746
747         if (priv->tx) {
748                 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
749                         ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
750                         gve_remove_napi(priv, ntfy_idx);
751                 }
752                 gve_tx_free_rings(priv);
753                 kvfree(priv->tx);
754                 priv->tx = NULL;
755         }
756         if (priv->rx) {
757                 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
758                         ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
759                         gve_remove_napi(priv, ntfy_idx);
760                 }
761                 gve_rx_free_rings(priv);
762                 kvfree(priv->rx);
763                 priv->rx = NULL;
764         }
765 }
766
767 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
768                    struct page **page, dma_addr_t *dma,
769                    enum dma_data_direction dir)
770 {
771         *page = alloc_page(GFP_KERNEL);
772         if (!*page) {
773                 priv->page_alloc_fail++;
774                 return -ENOMEM;
775         }
776         *dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
777         if (dma_mapping_error(dev, *dma)) {
778                 priv->dma_mapping_error++;
779                 put_page(*page);
780                 return -ENOMEM;
781         }
782         return 0;
783 }
784
785 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
786                                      int pages)
787 {
788         struct gve_queue_page_list *qpl = &priv->qpls[id];
789         int err;
790         int i;
791
792         if (pages + priv->num_registered_pages > priv->max_registered_pages) {
793                 netif_err(priv, drv, priv->dev,
794                           "Reached max number of registered pages %llu > %llu\n",
795                           pages + priv->num_registered_pages,
796                           priv->max_registered_pages);
797                 return -EINVAL;
798         }
799
800         qpl->id = id;
801         qpl->num_entries = 0;
802         qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
803         /* caller handles clean up */
804         if (!qpl->pages)
805                 return -ENOMEM;
806         qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
807         /* caller handles clean up */
808         if (!qpl->page_buses)
809                 return -ENOMEM;
810
811         for (i = 0; i < pages; i++) {
812                 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
813                                      &qpl->page_buses[i],
814                                      gve_qpl_dma_dir(priv, id));
815                 /* caller handles clean up */
816                 if (err)
817                         return -ENOMEM;
818                 qpl->num_entries++;
819         }
820         priv->num_registered_pages += pages;
821
822         return 0;
823 }
824
825 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
826                    enum dma_data_direction dir)
827 {
828         if (!dma_mapping_error(dev, dma))
829                 dma_unmap_page(dev, dma, PAGE_SIZE, dir);
830         if (page)
831                 put_page(page);
832 }
833
834 static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
835 {
836         struct gve_queue_page_list *qpl = &priv->qpls[id];
837         int i;
838
839         if (!qpl->pages)
840                 return;
841         if (!qpl->page_buses)
842                 goto free_pages;
843
844         for (i = 0; i < qpl->num_entries; i++)
845                 gve_free_page(&priv->pdev->dev, qpl->pages[i],
846                               qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
847
848         kvfree(qpl->page_buses);
849 free_pages:
850         kvfree(qpl->pages);
851         priv->num_registered_pages -= qpl->num_entries;
852 }
853
854 static int gve_alloc_qpls(struct gve_priv *priv)
855 {
856         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
857         int i, j;
858         int err;
859
860         /* Raw addressing means no QPLs */
861         if (priv->queue_format == GVE_GQI_RDA_FORMAT)
862                 return 0;
863
864         priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL);
865         if (!priv->qpls)
866                 return -ENOMEM;
867
868         for (i = 0; i < gve_num_tx_qpls(priv); i++) {
869                 err = gve_alloc_queue_page_list(priv, i,
870                                                 priv->tx_pages_per_qpl);
871                 if (err)
872                         goto free_qpls;
873         }
874         for (; i < num_qpls; i++) {
875                 err = gve_alloc_queue_page_list(priv, i,
876                                                 priv->rx_data_slot_cnt);
877                 if (err)
878                         goto free_qpls;
879         }
880
881         priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
882                                      sizeof(unsigned long) * BITS_PER_BYTE;
883         priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls),
884                                             sizeof(unsigned long), GFP_KERNEL);
885         if (!priv->qpl_cfg.qpl_id_map) {
886                 err = -ENOMEM;
887                 goto free_qpls;
888         }
889
890         return 0;
891
892 free_qpls:
893         for (j = 0; j <= i; j++)
894                 gve_free_queue_page_list(priv, j);
895         kvfree(priv->qpls);
896         return err;
897 }
898
899 static void gve_free_qpls(struct gve_priv *priv)
900 {
901         int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
902         int i;
903
904         /* Raw addressing means no QPLs */
905         if (priv->queue_format == GVE_GQI_RDA_FORMAT)
906                 return;
907
908         kvfree(priv->qpl_cfg.qpl_id_map);
909
910         for (i = 0; i < num_qpls; i++)
911                 gve_free_queue_page_list(priv, i);
912
913         kvfree(priv->qpls);
914 }
915
916 /* Use this to schedule a reset when the device is capable of continuing
917  * to handle other requests in its current state. If it is not, do a reset
918  * in thread instead.
919  */
920 void gve_schedule_reset(struct gve_priv *priv)
921 {
922         gve_set_do_reset(priv);
923         queue_work(priv->gve_wq, &priv->service_task);
924 }
925
926 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
927 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
928 static void gve_turndown(struct gve_priv *priv);
929 static void gve_turnup(struct gve_priv *priv);
930
931 static int gve_open(struct net_device *dev)
932 {
933         struct gve_priv *priv = netdev_priv(dev);
934         int err;
935
936         err = gve_alloc_qpls(priv);
937         if (err)
938                 return err;
939
940         err = gve_alloc_rings(priv);
941         if (err)
942                 goto free_qpls;
943
944         err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
945         if (err)
946                 goto free_rings;
947         err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
948         if (err)
949                 goto free_rings;
950
951         err = gve_register_qpls(priv);
952         if (err)
953                 goto reset;
954
955         if (!gve_is_gqi(priv)) {
956                 /* Hard code this for now. This may be tuned in the future for
957                  * performance.
958                  */
959                 priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
960         }
961         err = gve_create_rings(priv);
962         if (err)
963                 goto reset;
964
965         gve_set_device_rings_ok(priv);
966
967         if (gve_get_report_stats(priv))
968                 mod_timer(&priv->stats_report_timer,
969                           round_jiffies(jiffies +
970                                 msecs_to_jiffies(priv->stats_report_timer_period)));
971
972         gve_turnup(priv);
973         queue_work(priv->gve_wq, &priv->service_task);
974         priv->interface_up_cnt++;
975         return 0;
976
977 free_rings:
978         gve_free_rings(priv);
979 free_qpls:
980         gve_free_qpls(priv);
981         return err;
982
983 reset:
984         /* This must have been called from a reset due to the rtnl lock
985          * so just return at this point.
986          */
987         if (gve_get_reset_in_progress(priv))
988                 return err;
989         /* Otherwise reset before returning */
990         gve_reset_and_teardown(priv, true);
991         /* if this fails there is nothing we can do so just ignore the return */
992         gve_reset_recovery(priv, false);
993         /* return the original error */
994         return err;
995 }
996
997 static int gve_close(struct net_device *dev)
998 {
999         struct gve_priv *priv = netdev_priv(dev);
1000         int err;
1001
1002         netif_carrier_off(dev);
1003         if (gve_get_device_rings_ok(priv)) {
1004                 gve_turndown(priv);
1005                 err = gve_destroy_rings(priv);
1006                 if (err)
1007                         goto err;
1008                 err = gve_unregister_qpls(priv);
1009                 if (err)
1010                         goto err;
1011                 gve_clear_device_rings_ok(priv);
1012         }
1013         del_timer_sync(&priv->stats_report_timer);
1014
1015         gve_free_rings(priv);
1016         gve_free_qpls(priv);
1017         priv->interface_down_cnt++;
1018         return 0;
1019
1020 err:
1021         /* This must have been called from a reset due to the rtnl lock
1022          * so just return at this point.
1023          */
1024         if (gve_get_reset_in_progress(priv))
1025                 return err;
1026         /* Otherwise reset before returning */
1027         gve_reset_and_teardown(priv, true);
1028         return gve_reset_recovery(priv, false);
1029 }
1030
1031 int gve_adjust_queues(struct gve_priv *priv,
1032                       struct gve_queue_config new_rx_config,
1033                       struct gve_queue_config new_tx_config)
1034 {
1035         int err;
1036
1037         if (netif_carrier_ok(priv->dev)) {
1038                 /* To make this process as simple as possible we teardown the
1039                  * device, set the new configuration, and then bring the device
1040                  * up again.
1041                  */
1042                 err = gve_close(priv->dev);
1043                 /* we have already tried to reset in close,
1044                  * just fail at this point
1045                  */
1046                 if (err)
1047                         return err;
1048                 priv->tx_cfg = new_tx_config;
1049                 priv->rx_cfg = new_rx_config;
1050
1051                 err = gve_open(priv->dev);
1052                 if (err)
1053                         goto err;
1054
1055                 return 0;
1056         }
1057         /* Set the config for the next up. */
1058         priv->tx_cfg = new_tx_config;
1059         priv->rx_cfg = new_rx_config;
1060
1061         return 0;
1062 err:
1063         netif_err(priv, drv, priv->dev,
1064                   "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
1065         gve_turndown(priv);
1066         return err;
1067 }
1068
1069 static void gve_turndown(struct gve_priv *priv)
1070 {
1071         int idx;
1072
1073         if (netif_carrier_ok(priv->dev))
1074                 netif_carrier_off(priv->dev);
1075
1076         if (!gve_get_napi_enabled(priv))
1077                 return;
1078
1079         /* Disable napi to prevent more work from coming in */
1080         for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1081                 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1082                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1083
1084                 napi_disable(&block->napi);
1085         }
1086         for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1087                 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1088                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1089
1090                 napi_disable(&block->napi);
1091         }
1092
1093         /* Stop tx queues */
1094         netif_tx_disable(priv->dev);
1095
1096         gve_clear_napi_enabled(priv);
1097         gve_clear_report_stats(priv);
1098 }
1099
1100 static void gve_turnup(struct gve_priv *priv)
1101 {
1102         int idx;
1103
1104         /* Start the tx queues */
1105         netif_tx_start_all_queues(priv->dev);
1106
1107         /* Enable napi and unmask interrupts for all queues */
1108         for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1109                 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1110                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1111
1112                 napi_enable(&block->napi);
1113                 if (gve_is_gqi(priv)) {
1114                         iowrite32be(0, gve_irq_doorbell(priv, block));
1115                 } else {
1116                         gve_set_itr_coalesce_usecs_dqo(priv, block,
1117                                                        priv->tx_coalesce_usecs);
1118                 }
1119         }
1120         for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1121                 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1122                 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1123
1124                 napi_enable(&block->napi);
1125                 if (gve_is_gqi(priv)) {
1126                         iowrite32be(0, gve_irq_doorbell(priv, block));
1127                 } else {
1128                         gve_set_itr_coalesce_usecs_dqo(priv, block,
1129                                                        priv->rx_coalesce_usecs);
1130                 }
1131         }
1132
1133         gve_set_napi_enabled(priv);
1134 }
1135
1136 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
1137 {
1138         struct gve_notify_block *block;
1139         struct gve_tx_ring *tx = NULL;
1140         struct gve_priv *priv;
1141         u32 last_nic_done;
1142         u32 current_time;
1143         u32 ntfy_idx;
1144
1145         netdev_info(dev, "Timeout on tx queue, %d", txqueue);
1146         priv = netdev_priv(dev);
1147         if (txqueue > priv->tx_cfg.num_queues)
1148                 goto reset;
1149
1150         ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
1151         if (ntfy_idx >= priv->num_ntfy_blks)
1152                 goto reset;
1153
1154         block = &priv->ntfy_blocks[ntfy_idx];
1155         tx = block->tx;
1156
1157         current_time = jiffies_to_msecs(jiffies);
1158         if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
1159                 goto reset;
1160
1161         /* Check to see if there are missed completions, which will allow us to
1162          * kick the queue.
1163          */
1164         last_nic_done = gve_tx_load_event_counter(priv, tx);
1165         if (last_nic_done - tx->done) {
1166                 netdev_info(dev, "Kicking queue %d", txqueue);
1167                 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
1168                 napi_schedule(&block->napi);
1169                 tx->last_kick_msec = current_time;
1170                 goto out;
1171         } // Else reset.
1172
1173 reset:
1174         gve_schedule_reset(priv);
1175
1176 out:
1177         if (tx)
1178                 tx->queue_timeout++;
1179         priv->tx_timeo_cnt++;
1180 }
1181
1182 static int gve_set_features(struct net_device *netdev,
1183                             netdev_features_t features)
1184 {
1185         const netdev_features_t orig_features = netdev->features;
1186         struct gve_priv *priv = netdev_priv(netdev);
1187         int err;
1188
1189         if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
1190                 netdev->features ^= NETIF_F_LRO;
1191                 if (netif_carrier_ok(netdev)) {
1192                         /* To make this process as simple as possible we
1193                          * teardown the device, set the new configuration,
1194                          * and then bring the device up again.
1195                          */
1196                         err = gve_close(netdev);
1197                         /* We have already tried to reset in close, just fail
1198                          * at this point.
1199                          */
1200                         if (err)
1201                                 goto err;
1202
1203                         err = gve_open(netdev);
1204                         if (err)
1205                                 goto err;
1206                 }
1207         }
1208
1209         return 0;
1210 err:
1211         /* Reverts the change on error. */
1212         netdev->features = orig_features;
1213         netif_err(priv, drv, netdev,
1214                   "Set features failed! !!! DISABLING ALL QUEUES !!!\n");
1215         return err;
1216 }
1217
1218 static const struct net_device_ops gve_netdev_ops = {
1219         .ndo_start_xmit         =       gve_start_xmit,
1220         .ndo_open               =       gve_open,
1221         .ndo_stop               =       gve_close,
1222         .ndo_get_stats64        =       gve_get_stats,
1223         .ndo_tx_timeout         =       gve_tx_timeout,
1224         .ndo_set_features       =       gve_set_features,
1225 };
1226
1227 static void gve_handle_status(struct gve_priv *priv, u32 status)
1228 {
1229         if (GVE_DEVICE_STATUS_RESET_MASK & status) {
1230                 dev_info(&priv->pdev->dev, "Device requested reset.\n");
1231                 gve_set_do_reset(priv);
1232         }
1233         if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
1234                 priv->stats_report_trigger_cnt++;
1235                 gve_set_do_report_stats(priv);
1236         }
1237 }
1238
1239 static void gve_handle_reset(struct gve_priv *priv)
1240 {
1241         /* A service task will be scheduled at the end of probe to catch any
1242          * resets that need to happen, and we don't want to reset until
1243          * probe is done.
1244          */
1245         if (gve_get_probe_in_progress(priv))
1246                 return;
1247
1248         if (gve_get_do_reset(priv)) {
1249                 rtnl_lock();
1250                 gve_reset(priv, false);
1251                 rtnl_unlock();
1252         }
1253 }
1254
1255 void gve_handle_report_stats(struct gve_priv *priv)
1256 {
1257         struct stats *stats = priv->stats_report->stats;
1258         int idx, stats_idx = 0;
1259         unsigned int start = 0;
1260         u64 tx_bytes;
1261
1262         if (!gve_get_report_stats(priv))
1263                 return;
1264
1265         be64_add_cpu(&priv->stats_report->written_count, 1);
1266         /* tx stats */
1267         if (priv->tx) {
1268                 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1269                         u32 last_completion = 0;
1270                         u32 tx_frames = 0;
1271
1272                         /* DQO doesn't currently support these metrics. */
1273                         if (gve_is_gqi(priv)) {
1274                                 last_completion = priv->tx[idx].done;
1275                                 tx_frames = priv->tx[idx].req;
1276                         }
1277
1278                         do {
1279                                 start = u64_stats_fetch_begin(&priv->tx[idx].statss);
1280                                 tx_bytes = priv->tx[idx].bytes_done;
1281                         } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
1282                         stats[stats_idx++] = (struct stats) {
1283                                 .stat_name = cpu_to_be32(TX_WAKE_CNT),
1284                                 .value = cpu_to_be64(priv->tx[idx].wake_queue),
1285                                 .queue_id = cpu_to_be32(idx),
1286                         };
1287                         stats[stats_idx++] = (struct stats) {
1288                                 .stat_name = cpu_to_be32(TX_STOP_CNT),
1289                                 .value = cpu_to_be64(priv->tx[idx].stop_queue),
1290                                 .queue_id = cpu_to_be32(idx),
1291                         };
1292                         stats[stats_idx++] = (struct stats) {
1293                                 .stat_name = cpu_to_be32(TX_FRAMES_SENT),
1294                                 .value = cpu_to_be64(tx_frames),
1295                                 .queue_id = cpu_to_be32(idx),
1296                         };
1297                         stats[stats_idx++] = (struct stats) {
1298                                 .stat_name = cpu_to_be32(TX_BYTES_SENT),
1299                                 .value = cpu_to_be64(tx_bytes),
1300                                 .queue_id = cpu_to_be32(idx),
1301                         };
1302                         stats[stats_idx++] = (struct stats) {
1303                                 .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
1304                                 .value = cpu_to_be64(last_completion),
1305                                 .queue_id = cpu_to_be32(idx),
1306                         };
1307                         stats[stats_idx++] = (struct stats) {
1308                                 .stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
1309                                 .value = cpu_to_be64(priv->tx[idx].queue_timeout),
1310                                 .queue_id = cpu_to_be32(idx),
1311                         };
1312                 }
1313         }
1314         /* rx stats */
1315         if (priv->rx) {
1316                 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1317                         stats[stats_idx++] = (struct stats) {
1318                                 .stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
1319                                 .value = cpu_to_be64(priv->rx[idx].desc.seqno),
1320                                 .queue_id = cpu_to_be32(idx),
1321                         };
1322                         stats[stats_idx++] = (struct stats) {
1323                                 .stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
1324                                 .value = cpu_to_be64(priv->rx[0].fill_cnt),
1325                                 .queue_id = cpu_to_be32(idx),
1326                         };
1327                 }
1328         }
1329 }
1330
1331 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1332 {
1333         if (!gve_get_napi_enabled(priv))
1334                 return;
1335
1336         if (link_status == netif_carrier_ok(priv->dev))
1337                 return;
1338
1339         if (link_status) {
1340                 netdev_info(priv->dev, "Device link is up.\n");
1341                 netif_carrier_on(priv->dev);
1342         } else {
1343                 netdev_info(priv->dev, "Device link is down.\n");
1344                 netif_carrier_off(priv->dev);
1345         }
1346 }
1347
1348 /* Handle NIC status register changes, reset requests and report stats */
1349 static void gve_service_task(struct work_struct *work)
1350 {
1351         struct gve_priv *priv = container_of(work, struct gve_priv,
1352                                              service_task);
1353         u32 status = ioread32be(&priv->reg_bar0->device_status);
1354
1355         gve_handle_status(priv, status);
1356
1357         gve_handle_reset(priv);
1358         gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1359 }
1360
1361 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1362 {
1363         int num_ntfy;
1364         int err;
1365
1366         /* Set up the adminq */
1367         err = gve_adminq_alloc(&priv->pdev->dev, priv);
1368         if (err) {
1369                 dev_err(&priv->pdev->dev,
1370                         "Failed to alloc admin queue: err=%d\n", err);
1371                 return err;
1372         }
1373
1374         if (skip_describe_device)
1375                 goto setup_device;
1376
1377         priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
1378         /* Get the initial information we need from the device */
1379         err = gve_adminq_describe_device(priv);
1380         if (err) {
1381                 dev_err(&priv->pdev->dev,
1382                         "Could not get device information: err=%d\n", err);
1383                 goto err;
1384         }
1385         priv->dev->mtu = priv->dev->max_mtu;
1386         num_ntfy = pci_msix_vec_count(priv->pdev);
1387         if (num_ntfy <= 0) {
1388                 dev_err(&priv->pdev->dev,
1389                         "could not count MSI-x vectors: err=%d\n", num_ntfy);
1390                 err = num_ntfy;
1391                 goto err;
1392         } else if (num_ntfy < GVE_MIN_MSIX) {
1393                 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
1394                         GVE_MIN_MSIX, num_ntfy);
1395                 err = -EINVAL;
1396                 goto err;
1397         }
1398
1399         priv->num_registered_pages = 0;
1400         priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
1401         /* gvnic has one Notification Block per MSI-x vector, except for the
1402          * management vector
1403          */
1404         priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1405         priv->mgmt_msix_idx = priv->num_ntfy_blks;
1406
1407         priv->tx_cfg.max_queues =
1408                 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
1409         priv->rx_cfg.max_queues =
1410                 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
1411
1412         priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
1413         priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
1414         if (priv->default_num_queues > 0) {
1415                 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
1416                                                 priv->tx_cfg.num_queues);
1417                 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
1418                                                 priv->rx_cfg.num_queues);
1419         }
1420
1421         dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
1422                  priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
1423         dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
1424                  priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
1425
1426         if (!gve_is_gqi(priv)) {
1427                 priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO;
1428                 priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
1429         }
1430
1431 setup_device:
1432         err = gve_setup_device_resources(priv);
1433         if (!err)
1434                 return 0;
1435 err:
1436         gve_adminq_free(&priv->pdev->dev, priv);
1437         return err;
1438 }
1439
1440 static void gve_teardown_priv_resources(struct gve_priv *priv)
1441 {
1442         gve_teardown_device_resources(priv);
1443         gve_adminq_free(&priv->pdev->dev, priv);
1444 }
1445
1446 static void gve_trigger_reset(struct gve_priv *priv)
1447 {
1448         /* Reset the device by releasing the AQ */
1449         gve_adminq_release(priv);
1450 }
1451
1452 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
1453 {
1454         gve_trigger_reset(priv);
1455         /* With the reset having already happened, close cannot fail */
1456         if (was_up)
1457                 gve_close(priv->dev);
1458         gve_teardown_priv_resources(priv);
1459 }
1460
1461 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
1462 {
1463         int err;
1464
1465         err = gve_init_priv(priv, true);
1466         if (err)
1467                 goto err;
1468         if (was_up) {
1469                 err = gve_open(priv->dev);
1470                 if (err)
1471                         goto err;
1472         }
1473         return 0;
1474 err:
1475         dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
1476         gve_turndown(priv);
1477         return err;
1478 }
1479
1480 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
1481 {
1482         bool was_up = netif_carrier_ok(priv->dev);
1483         int err;
1484
1485         dev_info(&priv->pdev->dev, "Performing reset\n");
1486         gve_clear_do_reset(priv);
1487         gve_set_reset_in_progress(priv);
1488         /* If we aren't attempting to teardown normally, just go turndown and
1489          * reset right away.
1490          */
1491         if (!attempt_teardown) {
1492                 gve_turndown(priv);
1493                 gve_reset_and_teardown(priv, was_up);
1494         } else {
1495                 /* Otherwise attempt to close normally */
1496                 if (was_up) {
1497                         err = gve_close(priv->dev);
1498                         /* If that fails reset as we did above */
1499                         if (err)
1500                                 gve_reset_and_teardown(priv, was_up);
1501                 }
1502                 /* Clean up any remaining resources */
1503                 gve_teardown_priv_resources(priv);
1504         }
1505
1506         /* Set it all back up */
1507         err = gve_reset_recovery(priv, was_up);
1508         gve_clear_reset_in_progress(priv);
1509         priv->reset_cnt++;
1510         priv->interface_up_cnt = 0;
1511         priv->interface_down_cnt = 0;
1512         priv->stats_report_trigger_cnt = 0;
1513         return err;
1514 }
1515
1516 static void gve_write_version(u8 __iomem *driver_version_register)
1517 {
1518         const char *c = gve_version_prefix;
1519
1520         while (*c) {
1521                 writeb(*c, driver_version_register);
1522                 c++;
1523         }
1524
1525         c = gve_version_str;
1526         while (*c) {
1527                 writeb(*c, driver_version_register);
1528                 c++;
1529         }
1530         writeb('\n', driver_version_register);
1531 }
1532
1533 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1534 {
1535         int max_tx_queues, max_rx_queues;
1536         struct net_device *dev;
1537         __be32 __iomem *db_bar;
1538         struct gve_registers __iomem *reg_bar;
1539         struct gve_priv *priv;
1540         int err;
1541
1542         err = pci_enable_device(pdev);
1543         if (err)
1544                 return err;
1545
1546         err = pci_request_regions(pdev, "gvnic-cfg");
1547         if (err)
1548                 goto abort_with_enabled;
1549
1550         pci_set_master(pdev);
1551
1552         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1553         if (err) {
1554                 dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
1555                 goto abort_with_pci_region;
1556         }
1557
1558         reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
1559         if (!reg_bar) {
1560                 dev_err(&pdev->dev, "Failed to map pci bar!\n");
1561                 err = -ENOMEM;
1562                 goto abort_with_pci_region;
1563         }
1564
1565         db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
1566         if (!db_bar) {
1567                 dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
1568                 err = -ENOMEM;
1569                 goto abort_with_reg_bar;
1570         }
1571
1572         gve_write_version(&reg_bar->driver_version);
1573         /* Get max queues to alloc etherdev */
1574         max_tx_queues = ioread32be(&reg_bar->max_tx_queues);
1575         max_rx_queues = ioread32be(&reg_bar->max_rx_queues);
1576         /* Alloc and setup the netdev and priv */
1577         dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
1578         if (!dev) {
1579                 dev_err(&pdev->dev, "could not allocate netdev\n");
1580                 err = -ENOMEM;
1581                 goto abort_with_db_bar;
1582         }
1583         SET_NETDEV_DEV(dev, &pdev->dev);
1584         pci_set_drvdata(pdev, dev);
1585         dev->ethtool_ops = &gve_ethtool_ops;
1586         dev->netdev_ops = &gve_netdev_ops;
1587
1588         /* Set default and supported features.
1589          *
1590          * Features might be set in other locations as well (such as
1591          * `gve_adminq_describe_device`).
1592          */
1593         dev->hw_features = NETIF_F_HIGHDMA;
1594         dev->hw_features |= NETIF_F_SG;
1595         dev->hw_features |= NETIF_F_HW_CSUM;
1596         dev->hw_features |= NETIF_F_TSO;
1597         dev->hw_features |= NETIF_F_TSO6;
1598         dev->hw_features |= NETIF_F_TSO_ECN;
1599         dev->hw_features |= NETIF_F_RXCSUM;
1600         dev->hw_features |= NETIF_F_RXHASH;
1601         dev->features = dev->hw_features;
1602         dev->watchdog_timeo = 5 * HZ;
1603         dev->min_mtu = ETH_MIN_MTU;
1604         netif_carrier_off(dev);
1605
1606         priv = netdev_priv(dev);
1607         priv->dev = dev;
1608         priv->pdev = pdev;
1609         priv->msg_enable = DEFAULT_MSG_LEVEL;
1610         priv->reg_bar0 = reg_bar;
1611         priv->db_bar2 = db_bar;
1612         priv->service_task_flags = 0x0;
1613         priv->state_flags = 0x0;
1614         priv->ethtool_flags = 0x0;
1615
1616         gve_set_probe_in_progress(priv);
1617         priv->gve_wq = alloc_ordered_workqueue("gve", 0);
1618         if (!priv->gve_wq) {
1619                 dev_err(&pdev->dev, "Could not allocate workqueue");
1620                 err = -ENOMEM;
1621                 goto abort_with_netdev;
1622         }
1623         INIT_WORK(&priv->service_task, gve_service_task);
1624         INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
1625         priv->tx_cfg.max_queues = max_tx_queues;
1626         priv->rx_cfg.max_queues = max_rx_queues;
1627
1628         err = gve_init_priv(priv, false);
1629         if (err)
1630                 goto abort_with_wq;
1631
1632         err = register_netdev(dev);
1633         if (err)
1634                 goto abort_with_gve_init;
1635
1636         dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
1637         dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
1638         gve_clear_probe_in_progress(priv);
1639         queue_work(priv->gve_wq, &priv->service_task);
1640         return 0;
1641
1642 abort_with_gve_init:
1643         gve_teardown_priv_resources(priv);
1644
1645 abort_with_wq:
1646         destroy_workqueue(priv->gve_wq);
1647
1648 abort_with_netdev:
1649         free_netdev(dev);
1650
1651 abort_with_db_bar:
1652         pci_iounmap(pdev, db_bar);
1653
1654 abort_with_reg_bar:
1655         pci_iounmap(pdev, reg_bar);
1656
1657 abort_with_pci_region:
1658         pci_release_regions(pdev);
1659
1660 abort_with_enabled:
1661         pci_disable_device(pdev);
1662         return err;
1663 }
1664
1665 static void gve_remove(struct pci_dev *pdev)
1666 {
1667         struct net_device *netdev = pci_get_drvdata(pdev);
1668         struct gve_priv *priv = netdev_priv(netdev);
1669         __be32 __iomem *db_bar = priv->db_bar2;
1670         void __iomem *reg_bar = priv->reg_bar0;
1671
1672         unregister_netdev(netdev);
1673         gve_teardown_priv_resources(priv);
1674         destroy_workqueue(priv->gve_wq);
1675         free_netdev(netdev);
1676         pci_iounmap(pdev, db_bar);
1677         pci_iounmap(pdev, reg_bar);
1678         pci_release_regions(pdev);
1679         pci_disable_device(pdev);
1680 }
1681
1682 static void gve_shutdown(struct pci_dev *pdev)
1683 {
1684         struct net_device *netdev = pci_get_drvdata(pdev);
1685         struct gve_priv *priv = netdev_priv(netdev);
1686         bool was_up = netif_carrier_ok(priv->dev);
1687
1688         rtnl_lock();
1689         if (was_up && gve_close(priv->dev)) {
1690                 /* If the dev was up, attempt to close, if close fails, reset */
1691                 gve_reset_and_teardown(priv, was_up);
1692         } else {
1693                 /* If the dev wasn't up or close worked, finish tearing down */
1694                 gve_teardown_priv_resources(priv);
1695         }
1696         rtnl_unlock();
1697 }
1698
1699 #ifdef CONFIG_PM
1700 static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
1701 {
1702         struct net_device *netdev = pci_get_drvdata(pdev);
1703         struct gve_priv *priv = netdev_priv(netdev);
1704         bool was_up = netif_carrier_ok(priv->dev);
1705
1706         priv->suspend_cnt++;
1707         rtnl_lock();
1708         if (was_up && gve_close(priv->dev)) {
1709                 /* If the dev was up, attempt to close, if close fails, reset */
1710                 gve_reset_and_teardown(priv, was_up);
1711         } else {
1712                 /* If the dev wasn't up or close worked, finish tearing down */
1713                 gve_teardown_priv_resources(priv);
1714         }
1715         priv->up_before_suspend = was_up;
1716         rtnl_unlock();
1717         return 0;
1718 }
1719
1720 static int gve_resume(struct pci_dev *pdev)
1721 {
1722         struct net_device *netdev = pci_get_drvdata(pdev);
1723         struct gve_priv *priv = netdev_priv(netdev);
1724         int err;
1725
1726         priv->resume_cnt++;
1727         rtnl_lock();
1728         err = gve_reset_recovery(priv, priv->up_before_suspend);
1729         rtnl_unlock();
1730         return err;
1731 }
1732 #endif /* CONFIG_PM */
1733
1734 static const struct pci_device_id gve_id_table[] = {
1735         { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
1736         { }
1737 };
1738
1739 static struct pci_driver gvnic_driver = {
1740         .name           = "gvnic",
1741         .id_table       = gve_id_table,
1742         .probe          = gve_probe,
1743         .remove         = gve_remove,
1744         .shutdown       = gve_shutdown,
1745 #ifdef CONFIG_PM
1746         .suspend        = gve_suspend,
1747         .resume         = gve_resume,
1748 #endif
1749 };
1750
1751 module_pci_driver(gvnic_driver);
1752
1753 MODULE_DEVICE_TABLE(pci, gve_id_table);
1754 MODULE_AUTHOR("Google, Inc.");
1755 MODULE_DESCRIPTION("gVNIC Driver");
1756 MODULE_LICENSE("Dual MIT/GPL");
1757 MODULE_VERSION(GVE_VERSION);