1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
4 * Copyright (C) 2015-2021 Google, Inc.
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/sched.h>
13 #include <linux/timer.h>
14 #include <linux/workqueue.h>
15 #include <net/sch_generic.h>
18 #include "gve_adminq.h"
19 #include "gve_register.h"
21 #define GVE_DEFAULT_RX_COPYBREAK (256)
23 #define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK)
24 #define GVE_VERSION "1.0.0"
25 #define GVE_VERSION_PREFIX "GVE-"
27 // Minimum amount of time between queue kicks in msec (10 seconds)
28 #define MIN_TX_TIMEOUT_GAP (1000 * 10)
30 const char gve_version_str[] = GVE_VERSION;
31 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
33 static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
35 struct gve_priv *priv = netdev_priv(dev);
38 return gve_tx(skb, dev);
40 return gve_tx_dqo(skb, dev);
43 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
45 struct gve_priv *priv = netdev_priv(dev);
51 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
54 u64_stats_fetch_begin(&priv->rx[ring].statss);
55 packets = priv->rx[ring].rpackets;
56 bytes = priv->rx[ring].rbytes;
57 } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
59 s->rx_packets += packets;
64 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
67 u64_stats_fetch_begin(&priv->tx[ring].statss);
68 packets = priv->tx[ring].pkt_done;
69 bytes = priv->tx[ring].bytes_done;
70 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
72 s->tx_packets += packets;
78 static int gve_alloc_counter_array(struct gve_priv *priv)
81 dma_alloc_coherent(&priv->pdev->dev,
82 priv->num_event_counters *
83 sizeof(*priv->counter_array),
84 &priv->counter_array_bus, GFP_KERNEL);
85 if (!priv->counter_array)
91 static void gve_free_counter_array(struct gve_priv *priv)
93 if (!priv->counter_array)
96 dma_free_coherent(&priv->pdev->dev,
97 priv->num_event_counters *
98 sizeof(*priv->counter_array),
99 priv->counter_array, priv->counter_array_bus);
100 priv->counter_array = NULL;
103 /* NIC requests to report stats */
104 static void gve_stats_report_task(struct work_struct *work)
106 struct gve_priv *priv = container_of(work, struct gve_priv,
108 if (gve_get_do_report_stats(priv)) {
109 gve_handle_report_stats(priv);
110 gve_clear_do_report_stats(priv);
114 static void gve_stats_report_schedule(struct gve_priv *priv)
116 if (!gve_get_probe_in_progress(priv) &&
117 !gve_get_reset_in_progress(priv)) {
118 gve_set_do_report_stats(priv);
119 queue_work(priv->gve_wq, &priv->stats_report_task);
123 static void gve_stats_report_timer(struct timer_list *t)
125 struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
127 mod_timer(&priv->stats_report_timer,
128 round_jiffies(jiffies +
129 msecs_to_jiffies(priv->stats_report_timer_period)));
130 gve_stats_report_schedule(priv);
133 static int gve_alloc_stats_report(struct gve_priv *priv)
135 int tx_stats_num, rx_stats_num;
137 tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
138 priv->tx_cfg.num_queues;
139 rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
140 priv->rx_cfg.num_queues;
141 priv->stats_report_len = struct_size(priv->stats_report, stats,
142 tx_stats_num + rx_stats_num);
144 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
145 &priv->stats_report_bus, GFP_KERNEL);
146 if (!priv->stats_report)
148 /* Set up timer for the report-stats task */
149 timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
150 priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
154 static void gve_free_stats_report(struct gve_priv *priv)
156 if (!priv->stats_report)
159 del_timer_sync(&priv->stats_report_timer);
160 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
161 priv->stats_report, priv->stats_report_bus);
162 priv->stats_report = NULL;
165 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
167 struct gve_priv *priv = arg;
169 queue_work(priv->gve_wq, &priv->service_task);
173 static irqreturn_t gve_intr(int irq, void *arg)
175 struct gve_notify_block *block = arg;
176 struct gve_priv *priv = block->priv;
178 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
179 napi_schedule_irqoff(&block->napi);
183 static irqreturn_t gve_intr_dqo(int irq, void *arg)
185 struct gve_notify_block *block = arg;
187 /* Interrupts are automatically masked */
188 napi_schedule_irqoff(&block->napi);
192 static int gve_napi_poll(struct napi_struct *napi, int budget)
194 struct gve_notify_block *block;
195 __be32 __iomem *irq_doorbell;
196 bool reschedule = false;
197 struct gve_priv *priv;
200 block = container_of(napi, struct gve_notify_block, napi);
204 reschedule |= gve_tx_poll(block, budget);
206 work_done = gve_rx_poll(block, budget);
207 reschedule |= work_done == budget;
213 /* Complete processing - don't unmask irq if busy polling is enabled */
214 if (likely(napi_complete_done(napi, work_done))) {
215 irq_doorbell = gve_irq_doorbell(priv, block);
216 iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
218 /* Ensure IRQ ACK is visible before we check pending work.
219 * If queue had issued updates, it would be truly visible.
224 reschedule |= gve_tx_clean_pending(priv, block->tx);
226 reschedule |= gve_rx_work_pending(block->rx);
228 if (reschedule && napi_reschedule(napi))
229 iowrite32be(GVE_IRQ_MASK, irq_doorbell);
234 static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
236 struct gve_notify_block *block =
237 container_of(napi, struct gve_notify_block, napi);
238 struct gve_priv *priv = block->priv;
239 bool reschedule = false;
242 /* Clear PCI MSI-X Pending Bit Array (PBA)
244 * This bit is set if an interrupt event occurs while the vector is
245 * masked. If this bit is set and we reenable the interrupt, it will
246 * fire again. Since we're just about to poll the queue state, we don't
247 * need it to fire again.
249 * Under high softirq load, it's possible that the interrupt condition
250 * is triggered twice before we got the chance to process it.
252 gve_write_irq_doorbell_dqo(priv, block,
253 GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
256 reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
259 work_done = gve_rx_poll_dqo(block, budget);
260 reschedule |= work_done == budget;
266 if (likely(napi_complete_done(napi, work_done))) {
267 /* Enable interrupts again.
269 * We don't need to repoll afterwards because HW supports the
270 * PCI MSI-X PBA feature.
272 * Another interrupt would be triggered if a new event came in
273 * since the last one.
275 gve_write_irq_doorbell_dqo(priv, block,
276 GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
282 static int gve_alloc_notify_blocks(struct gve_priv *priv)
284 int num_vecs_requested = priv->num_ntfy_blks + 1;
285 char *name = priv->dev->name;
286 unsigned int active_cpus;
291 priv->msix_vectors = kvcalloc(num_vecs_requested,
292 sizeof(*priv->msix_vectors), GFP_KERNEL);
293 if (!priv->msix_vectors)
295 for (i = 0; i < num_vecs_requested; i++)
296 priv->msix_vectors[i].entry = i;
297 vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
298 GVE_MIN_MSIX, num_vecs_requested);
299 if (vecs_enabled < 0) {
300 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
301 GVE_MIN_MSIX, vecs_enabled);
303 goto abort_with_msix_vectors;
305 if (vecs_enabled != num_vecs_requested) {
306 int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
307 int vecs_per_type = new_num_ntfy_blks / 2;
308 int vecs_left = new_num_ntfy_blks % 2;
310 priv->num_ntfy_blks = new_num_ntfy_blks;
311 priv->mgmt_msix_idx = priv->num_ntfy_blks;
312 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
314 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
315 vecs_per_type + vecs_left);
316 dev_err(&priv->pdev->dev,
317 "Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
318 vecs_enabled, priv->tx_cfg.max_queues,
319 priv->rx_cfg.max_queues);
320 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
321 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
322 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
323 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
325 /* Half the notification blocks go to TX and half to RX */
326 active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
328 /* Setup Management Vector - the last vector */
329 snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
331 err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
332 gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
334 dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
335 goto abort_with_msix_enabled;
337 priv->irq_db_indices =
338 dma_alloc_coherent(&priv->pdev->dev,
339 priv->num_ntfy_blks *
340 sizeof(*priv->irq_db_indices),
341 &priv->irq_db_indices_bus, GFP_KERNEL);
342 if (!priv->irq_db_indices) {
344 goto abort_with_mgmt_vector;
347 priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks *
348 sizeof(*priv->ntfy_blocks), GFP_KERNEL);
349 if (!priv->ntfy_blocks) {
351 goto abort_with_irq_db_indices;
354 /* Setup the other blocks - the first n-1 vectors */
355 for (i = 0; i < priv->num_ntfy_blks; i++) {
356 struct gve_notify_block *block = &priv->ntfy_blocks[i];
359 snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
362 err = request_irq(priv->msix_vectors[msix_idx].vector,
363 gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
364 0, block->name, block);
366 dev_err(&priv->pdev->dev,
367 "Failed to receive msix vector %d\n", i);
368 goto abort_with_some_ntfy_blocks;
370 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
371 get_cpu_mask(i % active_cpus));
372 block->irq_db_index = &priv->irq_db_indices[i].index;
375 abort_with_some_ntfy_blocks:
376 for (j = 0; j < i; j++) {
377 struct gve_notify_block *block = &priv->ntfy_blocks[j];
380 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
382 free_irq(priv->msix_vectors[msix_idx].vector, block);
384 kvfree(priv->ntfy_blocks);
385 priv->ntfy_blocks = NULL;
386 abort_with_irq_db_indices:
387 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
388 sizeof(*priv->irq_db_indices),
389 priv->irq_db_indices, priv->irq_db_indices_bus);
390 priv->irq_db_indices = NULL;
391 abort_with_mgmt_vector:
392 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
393 abort_with_msix_enabled:
394 pci_disable_msix(priv->pdev);
395 abort_with_msix_vectors:
396 kvfree(priv->msix_vectors);
397 priv->msix_vectors = NULL;
401 static void gve_free_notify_blocks(struct gve_priv *priv)
405 if (!priv->msix_vectors)
409 for (i = 0; i < priv->num_ntfy_blks; i++) {
410 struct gve_notify_block *block = &priv->ntfy_blocks[i];
413 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
415 free_irq(priv->msix_vectors[msix_idx].vector, block);
417 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
418 kvfree(priv->ntfy_blocks);
419 priv->ntfy_blocks = NULL;
420 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
421 sizeof(*priv->irq_db_indices),
422 priv->irq_db_indices, priv->irq_db_indices_bus);
423 priv->irq_db_indices = NULL;
424 pci_disable_msix(priv->pdev);
425 kvfree(priv->msix_vectors);
426 priv->msix_vectors = NULL;
429 static int gve_setup_device_resources(struct gve_priv *priv)
433 err = gve_alloc_counter_array(priv);
436 err = gve_alloc_notify_blocks(priv);
438 goto abort_with_counter;
439 err = gve_alloc_stats_report(priv);
441 goto abort_with_ntfy_blocks;
442 err = gve_adminq_configure_device_resources(priv,
443 priv->counter_array_bus,
444 priv->num_event_counters,
445 priv->irq_db_indices_bus,
446 priv->num_ntfy_blks);
448 dev_err(&priv->pdev->dev,
449 "could not setup device_resources: err=%d\n", err);
451 goto abort_with_stats_report;
454 if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
455 priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
457 if (!priv->ptype_lut_dqo) {
459 goto abort_with_stats_report;
461 err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
463 dev_err(&priv->pdev->dev,
464 "Failed to get ptype map: err=%d\n", err);
465 goto abort_with_ptype_lut;
469 err = gve_adminq_report_stats(priv, priv->stats_report_len,
470 priv->stats_report_bus,
471 GVE_STATS_REPORT_TIMER_PERIOD);
473 dev_err(&priv->pdev->dev,
474 "Failed to report stats: err=%d\n", err);
475 gve_set_device_resources_ok(priv);
478 abort_with_ptype_lut:
479 kvfree(priv->ptype_lut_dqo);
480 priv->ptype_lut_dqo = NULL;
481 abort_with_stats_report:
482 gve_free_stats_report(priv);
483 abort_with_ntfy_blocks:
484 gve_free_notify_blocks(priv);
486 gve_free_counter_array(priv);
491 static void gve_trigger_reset(struct gve_priv *priv);
493 static void gve_teardown_device_resources(struct gve_priv *priv)
497 /* Tell device its resources are being freed */
498 if (gve_get_device_resources_ok(priv)) {
499 /* detach the stats report */
500 err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
502 dev_err(&priv->pdev->dev,
503 "Failed to detach stats report: err=%d\n", err);
504 gve_trigger_reset(priv);
506 err = gve_adminq_deconfigure_device_resources(priv);
508 dev_err(&priv->pdev->dev,
509 "Could not deconfigure device resources: err=%d\n",
511 gve_trigger_reset(priv);
515 kvfree(priv->ptype_lut_dqo);
516 priv->ptype_lut_dqo = NULL;
518 gve_free_counter_array(priv);
519 gve_free_notify_blocks(priv);
520 gve_free_stats_report(priv);
521 gve_clear_device_resources_ok(priv);
524 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
525 int (*gve_poll)(struct napi_struct *, int))
527 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
529 netif_napi_add(priv->dev, &block->napi, gve_poll,
533 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
535 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
537 netif_napi_del(&block->napi);
540 static int gve_register_qpls(struct gve_priv *priv)
542 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
546 for (i = 0; i < num_qpls; i++) {
547 err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
549 netif_err(priv, drv, priv->dev,
550 "failed to register queue page list %d\n",
552 /* This failure will trigger a reset - no need to clean
561 static int gve_unregister_qpls(struct gve_priv *priv)
563 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
567 for (i = 0; i < num_qpls; i++) {
568 err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
569 /* This failure will trigger a reset - no need to clean up */
571 netif_err(priv, drv, priv->dev,
572 "Failed to unregister queue page list %d\n",
580 static int gve_create_rings(struct gve_priv *priv)
585 err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
587 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
588 priv->tx_cfg.num_queues);
589 /* This failure will trigger a reset - no need to clean
594 netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
595 priv->tx_cfg.num_queues);
597 err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
599 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
600 priv->rx_cfg.num_queues);
601 /* This failure will trigger a reset - no need to clean
606 netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
607 priv->rx_cfg.num_queues);
609 if (gve_is_gqi(priv)) {
610 /* Rx data ring has been prefilled with packet buffers at queue
613 * Write the doorbell to provide descriptor slots and packet
614 * buffers to the NIC.
616 for (i = 0; i < priv->rx_cfg.num_queues; i++)
617 gve_rx_write_doorbell(priv, &priv->rx[i]);
619 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
620 /* Post buffers and ring doorbell. */
621 gve_rx_post_buffers_dqo(&priv->rx[i]);
628 static void add_napi_init_sync_stats(struct gve_priv *priv,
629 int (*napi_poll)(struct napi_struct *napi,
634 /* Add tx napi & init sync stats*/
635 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
636 int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
638 u64_stats_init(&priv->tx[i].statss);
639 priv->tx[i].ntfy_id = ntfy_idx;
640 gve_add_napi(priv, ntfy_idx, napi_poll);
642 /* Add rx napi & init sync stats*/
643 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
644 int ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
646 u64_stats_init(&priv->rx[i].statss);
647 priv->rx[i].ntfy_id = ntfy_idx;
648 gve_add_napi(priv, ntfy_idx, napi_poll);
652 static void gve_tx_free_rings(struct gve_priv *priv)
654 if (gve_is_gqi(priv)) {
655 gve_tx_free_rings_gqi(priv);
657 gve_tx_free_rings_dqo(priv);
661 static int gve_alloc_rings(struct gve_priv *priv)
666 priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx),
671 if (gve_is_gqi(priv))
672 err = gve_tx_alloc_rings(priv);
674 err = gve_tx_alloc_rings_dqo(priv);
679 priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx),
686 if (gve_is_gqi(priv))
687 err = gve_rx_alloc_rings(priv);
689 err = gve_rx_alloc_rings_dqo(priv);
693 if (gve_is_gqi(priv))
694 add_napi_init_sync_stats(priv, gve_napi_poll);
696 add_napi_init_sync_stats(priv, gve_napi_poll_dqo);
704 gve_tx_free_rings(priv);
711 static int gve_destroy_rings(struct gve_priv *priv)
715 err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
717 netif_err(priv, drv, priv->dev,
718 "failed to destroy tx queues\n");
719 /* This failure will trigger a reset - no need to clean up */
722 netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
723 err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
725 netif_err(priv, drv, priv->dev,
726 "failed to destroy rx queues\n");
727 /* This failure will trigger a reset - no need to clean up */
730 netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
734 static void gve_rx_free_rings(struct gve_priv *priv)
736 if (gve_is_gqi(priv))
737 gve_rx_free_rings_gqi(priv);
739 gve_rx_free_rings_dqo(priv);
742 static void gve_free_rings(struct gve_priv *priv)
748 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
749 ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
750 gve_remove_napi(priv, ntfy_idx);
752 gve_tx_free_rings(priv);
757 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
758 ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
759 gve_remove_napi(priv, ntfy_idx);
761 gve_rx_free_rings(priv);
767 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
768 struct page **page, dma_addr_t *dma,
769 enum dma_data_direction dir)
771 *page = alloc_page(GFP_KERNEL);
773 priv->page_alloc_fail++;
776 *dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
777 if (dma_mapping_error(dev, *dma)) {
778 priv->dma_mapping_error++;
785 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
788 struct gve_queue_page_list *qpl = &priv->qpls[id];
792 if (pages + priv->num_registered_pages > priv->max_registered_pages) {
793 netif_err(priv, drv, priv->dev,
794 "Reached max number of registered pages %llu > %llu\n",
795 pages + priv->num_registered_pages,
796 priv->max_registered_pages);
801 qpl->num_entries = 0;
802 qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
803 /* caller handles clean up */
806 qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
807 /* caller handles clean up */
808 if (!qpl->page_buses)
811 for (i = 0; i < pages; i++) {
812 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
814 gve_qpl_dma_dir(priv, id));
815 /* caller handles clean up */
820 priv->num_registered_pages += pages;
825 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
826 enum dma_data_direction dir)
828 if (!dma_mapping_error(dev, dma))
829 dma_unmap_page(dev, dma, PAGE_SIZE, dir);
834 static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
836 struct gve_queue_page_list *qpl = &priv->qpls[id];
841 if (!qpl->page_buses)
844 for (i = 0; i < qpl->num_entries; i++)
845 gve_free_page(&priv->pdev->dev, qpl->pages[i],
846 qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
848 kvfree(qpl->page_buses);
851 priv->num_registered_pages -= qpl->num_entries;
854 static int gve_alloc_qpls(struct gve_priv *priv)
856 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
860 /* Raw addressing means no QPLs */
861 if (priv->queue_format == GVE_GQI_RDA_FORMAT)
864 priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL);
868 for (i = 0; i < gve_num_tx_qpls(priv); i++) {
869 err = gve_alloc_queue_page_list(priv, i,
870 priv->tx_pages_per_qpl);
874 for (; i < num_qpls; i++) {
875 err = gve_alloc_queue_page_list(priv, i,
876 priv->rx_data_slot_cnt);
881 priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
882 sizeof(unsigned long) * BITS_PER_BYTE;
883 priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls),
884 sizeof(unsigned long), GFP_KERNEL);
885 if (!priv->qpl_cfg.qpl_id_map) {
893 for (j = 0; j <= i; j++)
894 gve_free_queue_page_list(priv, j);
899 static void gve_free_qpls(struct gve_priv *priv)
901 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
904 /* Raw addressing means no QPLs */
905 if (priv->queue_format == GVE_GQI_RDA_FORMAT)
908 kvfree(priv->qpl_cfg.qpl_id_map);
910 for (i = 0; i < num_qpls; i++)
911 gve_free_queue_page_list(priv, i);
916 /* Use this to schedule a reset when the device is capable of continuing
917 * to handle other requests in its current state. If it is not, do a reset
920 void gve_schedule_reset(struct gve_priv *priv)
922 gve_set_do_reset(priv);
923 queue_work(priv->gve_wq, &priv->service_task);
926 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
927 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
928 static void gve_turndown(struct gve_priv *priv);
929 static void gve_turnup(struct gve_priv *priv);
931 static int gve_open(struct net_device *dev)
933 struct gve_priv *priv = netdev_priv(dev);
936 err = gve_alloc_qpls(priv);
940 err = gve_alloc_rings(priv);
944 err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
947 err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
951 err = gve_register_qpls(priv);
955 if (!gve_is_gqi(priv)) {
956 /* Hard code this for now. This may be tuned in the future for
959 priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
961 err = gve_create_rings(priv);
965 gve_set_device_rings_ok(priv);
967 if (gve_get_report_stats(priv))
968 mod_timer(&priv->stats_report_timer,
969 round_jiffies(jiffies +
970 msecs_to_jiffies(priv->stats_report_timer_period)));
973 queue_work(priv->gve_wq, &priv->service_task);
974 priv->interface_up_cnt++;
978 gve_free_rings(priv);
984 /* This must have been called from a reset due to the rtnl lock
985 * so just return at this point.
987 if (gve_get_reset_in_progress(priv))
989 /* Otherwise reset before returning */
990 gve_reset_and_teardown(priv, true);
991 /* if this fails there is nothing we can do so just ignore the return */
992 gve_reset_recovery(priv, false);
993 /* return the original error */
997 static int gve_close(struct net_device *dev)
999 struct gve_priv *priv = netdev_priv(dev);
1002 netif_carrier_off(dev);
1003 if (gve_get_device_rings_ok(priv)) {
1005 err = gve_destroy_rings(priv);
1008 err = gve_unregister_qpls(priv);
1011 gve_clear_device_rings_ok(priv);
1013 del_timer_sync(&priv->stats_report_timer);
1015 gve_free_rings(priv);
1016 gve_free_qpls(priv);
1017 priv->interface_down_cnt++;
1021 /* This must have been called from a reset due to the rtnl lock
1022 * so just return at this point.
1024 if (gve_get_reset_in_progress(priv))
1026 /* Otherwise reset before returning */
1027 gve_reset_and_teardown(priv, true);
1028 return gve_reset_recovery(priv, false);
1031 int gve_adjust_queues(struct gve_priv *priv,
1032 struct gve_queue_config new_rx_config,
1033 struct gve_queue_config new_tx_config)
1037 if (netif_carrier_ok(priv->dev)) {
1038 /* To make this process as simple as possible we teardown the
1039 * device, set the new configuration, and then bring the device
1042 err = gve_close(priv->dev);
1043 /* we have already tried to reset in close,
1044 * just fail at this point
1048 priv->tx_cfg = new_tx_config;
1049 priv->rx_cfg = new_rx_config;
1051 err = gve_open(priv->dev);
1057 /* Set the config for the next up. */
1058 priv->tx_cfg = new_tx_config;
1059 priv->rx_cfg = new_rx_config;
1063 netif_err(priv, drv, priv->dev,
1064 "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
1069 static void gve_turndown(struct gve_priv *priv)
1073 if (netif_carrier_ok(priv->dev))
1074 netif_carrier_off(priv->dev);
1076 if (!gve_get_napi_enabled(priv))
1079 /* Disable napi to prevent more work from coming in */
1080 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1081 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1082 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1084 napi_disable(&block->napi);
1086 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1087 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1088 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1090 napi_disable(&block->napi);
1093 /* Stop tx queues */
1094 netif_tx_disable(priv->dev);
1096 gve_clear_napi_enabled(priv);
1097 gve_clear_report_stats(priv);
1100 static void gve_turnup(struct gve_priv *priv)
1104 /* Start the tx queues */
1105 netif_tx_start_all_queues(priv->dev);
1107 /* Enable napi and unmask interrupts for all queues */
1108 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1109 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1110 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1112 napi_enable(&block->napi);
1113 if (gve_is_gqi(priv)) {
1114 iowrite32be(0, gve_irq_doorbell(priv, block));
1116 gve_set_itr_coalesce_usecs_dqo(priv, block,
1117 priv->tx_coalesce_usecs);
1120 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1121 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1122 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1124 napi_enable(&block->napi);
1125 if (gve_is_gqi(priv)) {
1126 iowrite32be(0, gve_irq_doorbell(priv, block));
1128 gve_set_itr_coalesce_usecs_dqo(priv, block,
1129 priv->rx_coalesce_usecs);
1133 gve_set_napi_enabled(priv);
1136 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
1138 struct gve_notify_block *block;
1139 struct gve_tx_ring *tx = NULL;
1140 struct gve_priv *priv;
1145 netdev_info(dev, "Timeout on tx queue, %d", txqueue);
1146 priv = netdev_priv(dev);
1147 if (txqueue > priv->tx_cfg.num_queues)
1150 ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
1151 if (ntfy_idx >= priv->num_ntfy_blks)
1154 block = &priv->ntfy_blocks[ntfy_idx];
1157 current_time = jiffies_to_msecs(jiffies);
1158 if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
1161 /* Check to see if there are missed completions, which will allow us to
1164 last_nic_done = gve_tx_load_event_counter(priv, tx);
1165 if (last_nic_done - tx->done) {
1166 netdev_info(dev, "Kicking queue %d", txqueue);
1167 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
1168 napi_schedule(&block->napi);
1169 tx->last_kick_msec = current_time;
1174 gve_schedule_reset(priv);
1178 tx->queue_timeout++;
1179 priv->tx_timeo_cnt++;
1182 static int gve_set_features(struct net_device *netdev,
1183 netdev_features_t features)
1185 const netdev_features_t orig_features = netdev->features;
1186 struct gve_priv *priv = netdev_priv(netdev);
1189 if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
1190 netdev->features ^= NETIF_F_LRO;
1191 if (netif_carrier_ok(netdev)) {
1192 /* To make this process as simple as possible we
1193 * teardown the device, set the new configuration,
1194 * and then bring the device up again.
1196 err = gve_close(netdev);
1197 /* We have already tried to reset in close, just fail
1203 err = gve_open(netdev);
1211 /* Reverts the change on error. */
1212 netdev->features = orig_features;
1213 netif_err(priv, drv, netdev,
1214 "Set features failed! !!! DISABLING ALL QUEUES !!!\n");
1218 static const struct net_device_ops gve_netdev_ops = {
1219 .ndo_start_xmit = gve_start_xmit,
1220 .ndo_open = gve_open,
1221 .ndo_stop = gve_close,
1222 .ndo_get_stats64 = gve_get_stats,
1223 .ndo_tx_timeout = gve_tx_timeout,
1224 .ndo_set_features = gve_set_features,
1227 static void gve_handle_status(struct gve_priv *priv, u32 status)
1229 if (GVE_DEVICE_STATUS_RESET_MASK & status) {
1230 dev_info(&priv->pdev->dev, "Device requested reset.\n");
1231 gve_set_do_reset(priv);
1233 if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
1234 priv->stats_report_trigger_cnt++;
1235 gve_set_do_report_stats(priv);
1239 static void gve_handle_reset(struct gve_priv *priv)
1241 /* A service task will be scheduled at the end of probe to catch any
1242 * resets that need to happen, and we don't want to reset until
1245 if (gve_get_probe_in_progress(priv))
1248 if (gve_get_do_reset(priv)) {
1250 gve_reset(priv, false);
1255 void gve_handle_report_stats(struct gve_priv *priv)
1257 struct stats *stats = priv->stats_report->stats;
1258 int idx, stats_idx = 0;
1259 unsigned int start = 0;
1262 if (!gve_get_report_stats(priv))
1265 be64_add_cpu(&priv->stats_report->written_count, 1);
1268 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1269 u32 last_completion = 0;
1272 /* DQO doesn't currently support these metrics. */
1273 if (gve_is_gqi(priv)) {
1274 last_completion = priv->tx[idx].done;
1275 tx_frames = priv->tx[idx].req;
1279 start = u64_stats_fetch_begin(&priv->tx[idx].statss);
1280 tx_bytes = priv->tx[idx].bytes_done;
1281 } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
1282 stats[stats_idx++] = (struct stats) {
1283 .stat_name = cpu_to_be32(TX_WAKE_CNT),
1284 .value = cpu_to_be64(priv->tx[idx].wake_queue),
1285 .queue_id = cpu_to_be32(idx),
1287 stats[stats_idx++] = (struct stats) {
1288 .stat_name = cpu_to_be32(TX_STOP_CNT),
1289 .value = cpu_to_be64(priv->tx[idx].stop_queue),
1290 .queue_id = cpu_to_be32(idx),
1292 stats[stats_idx++] = (struct stats) {
1293 .stat_name = cpu_to_be32(TX_FRAMES_SENT),
1294 .value = cpu_to_be64(tx_frames),
1295 .queue_id = cpu_to_be32(idx),
1297 stats[stats_idx++] = (struct stats) {
1298 .stat_name = cpu_to_be32(TX_BYTES_SENT),
1299 .value = cpu_to_be64(tx_bytes),
1300 .queue_id = cpu_to_be32(idx),
1302 stats[stats_idx++] = (struct stats) {
1303 .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
1304 .value = cpu_to_be64(last_completion),
1305 .queue_id = cpu_to_be32(idx),
1307 stats[stats_idx++] = (struct stats) {
1308 .stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
1309 .value = cpu_to_be64(priv->tx[idx].queue_timeout),
1310 .queue_id = cpu_to_be32(idx),
1316 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1317 stats[stats_idx++] = (struct stats) {
1318 .stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
1319 .value = cpu_to_be64(priv->rx[idx].desc.seqno),
1320 .queue_id = cpu_to_be32(idx),
1322 stats[stats_idx++] = (struct stats) {
1323 .stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
1324 .value = cpu_to_be64(priv->rx[0].fill_cnt),
1325 .queue_id = cpu_to_be32(idx),
1331 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1333 if (!gve_get_napi_enabled(priv))
1336 if (link_status == netif_carrier_ok(priv->dev))
1340 netdev_info(priv->dev, "Device link is up.\n");
1341 netif_carrier_on(priv->dev);
1343 netdev_info(priv->dev, "Device link is down.\n");
1344 netif_carrier_off(priv->dev);
1348 /* Handle NIC status register changes, reset requests and report stats */
1349 static void gve_service_task(struct work_struct *work)
1351 struct gve_priv *priv = container_of(work, struct gve_priv,
1353 u32 status = ioread32be(&priv->reg_bar0->device_status);
1355 gve_handle_status(priv, status);
1357 gve_handle_reset(priv);
1358 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1361 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1366 /* Set up the adminq */
1367 err = gve_adminq_alloc(&priv->pdev->dev, priv);
1369 dev_err(&priv->pdev->dev,
1370 "Failed to alloc admin queue: err=%d\n", err);
1374 if (skip_describe_device)
1377 priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
1378 /* Get the initial information we need from the device */
1379 err = gve_adminq_describe_device(priv);
1381 dev_err(&priv->pdev->dev,
1382 "Could not get device information: err=%d\n", err);
1385 priv->dev->mtu = priv->dev->max_mtu;
1386 num_ntfy = pci_msix_vec_count(priv->pdev);
1387 if (num_ntfy <= 0) {
1388 dev_err(&priv->pdev->dev,
1389 "could not count MSI-x vectors: err=%d\n", num_ntfy);
1392 } else if (num_ntfy < GVE_MIN_MSIX) {
1393 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
1394 GVE_MIN_MSIX, num_ntfy);
1399 priv->num_registered_pages = 0;
1400 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
1401 /* gvnic has one Notification Block per MSI-x vector, except for the
1404 priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1405 priv->mgmt_msix_idx = priv->num_ntfy_blks;
1407 priv->tx_cfg.max_queues =
1408 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
1409 priv->rx_cfg.max_queues =
1410 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
1412 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
1413 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
1414 if (priv->default_num_queues > 0) {
1415 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
1416 priv->tx_cfg.num_queues);
1417 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
1418 priv->rx_cfg.num_queues);
1421 dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
1422 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
1423 dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
1424 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
1426 if (!gve_is_gqi(priv)) {
1427 priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO;
1428 priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
1432 err = gve_setup_device_resources(priv);
1436 gve_adminq_free(&priv->pdev->dev, priv);
1440 static void gve_teardown_priv_resources(struct gve_priv *priv)
1442 gve_teardown_device_resources(priv);
1443 gve_adminq_free(&priv->pdev->dev, priv);
1446 static void gve_trigger_reset(struct gve_priv *priv)
1448 /* Reset the device by releasing the AQ */
1449 gve_adminq_release(priv);
1452 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
1454 gve_trigger_reset(priv);
1455 /* With the reset having already happened, close cannot fail */
1457 gve_close(priv->dev);
1458 gve_teardown_priv_resources(priv);
1461 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
1465 err = gve_init_priv(priv, true);
1469 err = gve_open(priv->dev);
1475 dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
1480 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
1482 bool was_up = netif_carrier_ok(priv->dev);
1485 dev_info(&priv->pdev->dev, "Performing reset\n");
1486 gve_clear_do_reset(priv);
1487 gve_set_reset_in_progress(priv);
1488 /* If we aren't attempting to teardown normally, just go turndown and
1491 if (!attempt_teardown) {
1493 gve_reset_and_teardown(priv, was_up);
1495 /* Otherwise attempt to close normally */
1497 err = gve_close(priv->dev);
1498 /* If that fails reset as we did above */
1500 gve_reset_and_teardown(priv, was_up);
1502 /* Clean up any remaining resources */
1503 gve_teardown_priv_resources(priv);
1506 /* Set it all back up */
1507 err = gve_reset_recovery(priv, was_up);
1508 gve_clear_reset_in_progress(priv);
1510 priv->interface_up_cnt = 0;
1511 priv->interface_down_cnt = 0;
1512 priv->stats_report_trigger_cnt = 0;
1516 static void gve_write_version(u8 __iomem *driver_version_register)
1518 const char *c = gve_version_prefix;
1521 writeb(*c, driver_version_register);
1525 c = gve_version_str;
1527 writeb(*c, driver_version_register);
1530 writeb('\n', driver_version_register);
1533 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1535 int max_tx_queues, max_rx_queues;
1536 struct net_device *dev;
1537 __be32 __iomem *db_bar;
1538 struct gve_registers __iomem *reg_bar;
1539 struct gve_priv *priv;
1542 err = pci_enable_device(pdev);
1546 err = pci_request_regions(pdev, "gvnic-cfg");
1548 goto abort_with_enabled;
1550 pci_set_master(pdev);
1552 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1554 dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
1555 goto abort_with_pci_region;
1558 reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
1560 dev_err(&pdev->dev, "Failed to map pci bar!\n");
1562 goto abort_with_pci_region;
1565 db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
1567 dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
1569 goto abort_with_reg_bar;
1572 gve_write_version(®_bar->driver_version);
1573 /* Get max queues to alloc etherdev */
1574 max_tx_queues = ioread32be(®_bar->max_tx_queues);
1575 max_rx_queues = ioread32be(®_bar->max_rx_queues);
1576 /* Alloc and setup the netdev and priv */
1577 dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
1579 dev_err(&pdev->dev, "could not allocate netdev\n");
1581 goto abort_with_db_bar;
1583 SET_NETDEV_DEV(dev, &pdev->dev);
1584 pci_set_drvdata(pdev, dev);
1585 dev->ethtool_ops = &gve_ethtool_ops;
1586 dev->netdev_ops = &gve_netdev_ops;
1588 /* Set default and supported features.
1590 * Features might be set in other locations as well (such as
1591 * `gve_adminq_describe_device`).
1593 dev->hw_features = NETIF_F_HIGHDMA;
1594 dev->hw_features |= NETIF_F_SG;
1595 dev->hw_features |= NETIF_F_HW_CSUM;
1596 dev->hw_features |= NETIF_F_TSO;
1597 dev->hw_features |= NETIF_F_TSO6;
1598 dev->hw_features |= NETIF_F_TSO_ECN;
1599 dev->hw_features |= NETIF_F_RXCSUM;
1600 dev->hw_features |= NETIF_F_RXHASH;
1601 dev->features = dev->hw_features;
1602 dev->watchdog_timeo = 5 * HZ;
1603 dev->min_mtu = ETH_MIN_MTU;
1604 netif_carrier_off(dev);
1606 priv = netdev_priv(dev);
1609 priv->msg_enable = DEFAULT_MSG_LEVEL;
1610 priv->reg_bar0 = reg_bar;
1611 priv->db_bar2 = db_bar;
1612 priv->service_task_flags = 0x0;
1613 priv->state_flags = 0x0;
1614 priv->ethtool_flags = 0x0;
1616 gve_set_probe_in_progress(priv);
1617 priv->gve_wq = alloc_ordered_workqueue("gve", 0);
1618 if (!priv->gve_wq) {
1619 dev_err(&pdev->dev, "Could not allocate workqueue");
1621 goto abort_with_netdev;
1623 INIT_WORK(&priv->service_task, gve_service_task);
1624 INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
1625 priv->tx_cfg.max_queues = max_tx_queues;
1626 priv->rx_cfg.max_queues = max_rx_queues;
1628 err = gve_init_priv(priv, false);
1632 err = register_netdev(dev);
1634 goto abort_with_gve_init;
1636 dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
1637 dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
1638 gve_clear_probe_in_progress(priv);
1639 queue_work(priv->gve_wq, &priv->service_task);
1642 abort_with_gve_init:
1643 gve_teardown_priv_resources(priv);
1646 destroy_workqueue(priv->gve_wq);
1652 pci_iounmap(pdev, db_bar);
1655 pci_iounmap(pdev, reg_bar);
1657 abort_with_pci_region:
1658 pci_release_regions(pdev);
1661 pci_disable_device(pdev);
1665 static void gve_remove(struct pci_dev *pdev)
1667 struct net_device *netdev = pci_get_drvdata(pdev);
1668 struct gve_priv *priv = netdev_priv(netdev);
1669 __be32 __iomem *db_bar = priv->db_bar2;
1670 void __iomem *reg_bar = priv->reg_bar0;
1672 unregister_netdev(netdev);
1673 gve_teardown_priv_resources(priv);
1674 destroy_workqueue(priv->gve_wq);
1675 free_netdev(netdev);
1676 pci_iounmap(pdev, db_bar);
1677 pci_iounmap(pdev, reg_bar);
1678 pci_release_regions(pdev);
1679 pci_disable_device(pdev);
1682 static void gve_shutdown(struct pci_dev *pdev)
1684 struct net_device *netdev = pci_get_drvdata(pdev);
1685 struct gve_priv *priv = netdev_priv(netdev);
1686 bool was_up = netif_carrier_ok(priv->dev);
1689 if (was_up && gve_close(priv->dev)) {
1690 /* If the dev was up, attempt to close, if close fails, reset */
1691 gve_reset_and_teardown(priv, was_up);
1693 /* If the dev wasn't up or close worked, finish tearing down */
1694 gve_teardown_priv_resources(priv);
1700 static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
1702 struct net_device *netdev = pci_get_drvdata(pdev);
1703 struct gve_priv *priv = netdev_priv(netdev);
1704 bool was_up = netif_carrier_ok(priv->dev);
1706 priv->suspend_cnt++;
1708 if (was_up && gve_close(priv->dev)) {
1709 /* If the dev was up, attempt to close, if close fails, reset */
1710 gve_reset_and_teardown(priv, was_up);
1712 /* If the dev wasn't up or close worked, finish tearing down */
1713 gve_teardown_priv_resources(priv);
1715 priv->up_before_suspend = was_up;
1720 static int gve_resume(struct pci_dev *pdev)
1722 struct net_device *netdev = pci_get_drvdata(pdev);
1723 struct gve_priv *priv = netdev_priv(netdev);
1728 err = gve_reset_recovery(priv, priv->up_before_suspend);
1732 #endif /* CONFIG_PM */
1734 static const struct pci_device_id gve_id_table[] = {
1735 { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
1739 static struct pci_driver gvnic_driver = {
1741 .id_table = gve_id_table,
1743 .remove = gve_remove,
1744 .shutdown = gve_shutdown,
1746 .suspend = gve_suspend,
1747 .resume = gve_resume,
1751 module_pci_driver(gvnic_driver);
1753 MODULE_DEVICE_TABLE(pci, gve_id_table);
1754 MODULE_AUTHOR("Google, Inc.");
1755 MODULE_DESCRIPTION("gVNIC Driver");
1756 MODULE_LICENSE("Dual MIT/GPL");
1757 MODULE_VERSION(GVE_VERSION);