1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
4 * Copyright (C) 2015-2019 Google, Inc.
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/sched.h>
13 #include <linux/timer.h>
14 #include <linux/workqueue.h>
15 #include <net/sch_generic.h>
17 #include "gve_adminq.h"
18 #include "gve_register.h"
20 #define GVE_DEFAULT_RX_COPYBREAK (256)
22 #define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK)
23 #define GVE_VERSION "1.0.0"
24 #define GVE_VERSION_PREFIX "GVE-"
26 const char gve_version_str[] = GVE_VERSION;
27 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
29 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
31 struct gve_priv *priv = netdev_priv(dev);
36 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
39 u64_stats_fetch_begin(&priv->rx[ring].statss);
40 s->rx_packets += priv->rx[ring].rpackets;
41 s->rx_bytes += priv->rx[ring].rbytes;
42 } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
47 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
50 u64_stats_fetch_begin(&priv->tx[ring].statss);
51 s->tx_packets += priv->tx[ring].pkt_done;
52 s->tx_bytes += priv->tx[ring].bytes_done;
53 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
59 static int gve_alloc_counter_array(struct gve_priv *priv)
62 dma_alloc_coherent(&priv->pdev->dev,
63 priv->num_event_counters *
64 sizeof(*priv->counter_array),
65 &priv->counter_array_bus, GFP_KERNEL);
66 if (!priv->counter_array)
72 static void gve_free_counter_array(struct gve_priv *priv)
74 dma_free_coherent(&priv->pdev->dev,
75 priv->num_event_counters *
76 sizeof(*priv->counter_array),
77 priv->counter_array, priv->counter_array_bus);
78 priv->counter_array = NULL;
81 /* NIC requests to report stats */
82 static void gve_stats_report_task(struct work_struct *work)
84 struct gve_priv *priv = container_of(work, struct gve_priv,
86 if (gve_get_do_report_stats(priv)) {
87 gve_handle_report_stats(priv);
88 gve_clear_do_report_stats(priv);
92 static void gve_stats_report_schedule(struct gve_priv *priv)
94 if (!gve_get_probe_in_progress(priv) &&
95 !gve_get_reset_in_progress(priv)) {
96 gve_set_do_report_stats(priv);
97 queue_work(priv->gve_wq, &priv->stats_report_task);
101 static void gve_stats_report_timer(struct timer_list *t)
103 struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
105 mod_timer(&priv->stats_report_timer,
106 round_jiffies(jiffies +
107 msecs_to_jiffies(priv->stats_report_timer_period)));
108 gve_stats_report_schedule(priv);
111 static int gve_alloc_stats_report(struct gve_priv *priv)
113 int tx_stats_num, rx_stats_num;
115 tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
116 priv->tx_cfg.num_queues;
117 rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
118 priv->rx_cfg.num_queues;
119 priv->stats_report_len = struct_size(priv->stats_report, stats,
120 tx_stats_num + rx_stats_num);
122 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
123 &priv->stats_report_bus, GFP_KERNEL);
124 if (!priv->stats_report)
126 /* Set up timer for the report-stats task */
127 timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
128 priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
132 static void gve_free_stats_report(struct gve_priv *priv)
134 del_timer_sync(&priv->stats_report_timer);
135 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
136 priv->stats_report, priv->stats_report_bus);
137 priv->stats_report = NULL;
140 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
142 struct gve_priv *priv = arg;
144 queue_work(priv->gve_wq, &priv->service_task);
148 static irqreturn_t gve_intr(int irq, void *arg)
150 struct gve_notify_block *block = arg;
151 struct gve_priv *priv = block->priv;
153 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
154 napi_schedule_irqoff(&block->napi);
158 static int gve_napi_poll(struct napi_struct *napi, int budget)
160 struct gve_notify_block *block;
161 __be32 __iomem *irq_doorbell;
162 bool reschedule = false;
163 struct gve_priv *priv;
165 block = container_of(napi, struct gve_notify_block, napi);
169 reschedule |= gve_tx_poll(block, budget);
171 reschedule |= gve_rx_poll(block, budget);
177 irq_doorbell = gve_irq_doorbell(priv, block);
178 iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
180 /* Double check we have no extra work.
181 * Ensure unmask synchronizes with checking for work.
185 reschedule |= gve_tx_poll(block, -1);
187 reschedule |= gve_rx_poll(block, -1);
188 if (reschedule && napi_reschedule(napi))
189 iowrite32be(GVE_IRQ_MASK, irq_doorbell);
194 static int gve_alloc_notify_blocks(struct gve_priv *priv)
196 int num_vecs_requested = priv->num_ntfy_blks + 1;
197 char *name = priv->dev->name;
198 unsigned int active_cpus;
203 priv->msix_vectors = kvzalloc(num_vecs_requested *
204 sizeof(*priv->msix_vectors), GFP_KERNEL);
205 if (!priv->msix_vectors)
207 for (i = 0; i < num_vecs_requested; i++)
208 priv->msix_vectors[i].entry = i;
209 vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
210 GVE_MIN_MSIX, num_vecs_requested);
211 if (vecs_enabled < 0) {
212 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
213 GVE_MIN_MSIX, vecs_enabled);
215 goto abort_with_msix_vectors;
217 if (vecs_enabled != num_vecs_requested) {
218 int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
219 int vecs_per_type = new_num_ntfy_blks / 2;
220 int vecs_left = new_num_ntfy_blks % 2;
222 priv->num_ntfy_blks = new_num_ntfy_blks;
223 priv->mgmt_msix_idx = priv->num_ntfy_blks;
224 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
226 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
227 vecs_per_type + vecs_left);
228 dev_err(&priv->pdev->dev,
229 "Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
230 vecs_enabled, priv->tx_cfg.max_queues,
231 priv->rx_cfg.max_queues);
232 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
233 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
234 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
235 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
237 /* Half the notification blocks go to TX and half to RX */
238 active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
240 /* Setup Management Vector - the last vector */
241 snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
243 err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
244 gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
246 dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
247 goto abort_with_msix_enabled;
250 dma_alloc_coherent(&priv->pdev->dev,
251 priv->num_ntfy_blks *
252 sizeof(*priv->ntfy_blocks),
253 &priv->ntfy_block_bus, GFP_KERNEL);
254 if (!priv->ntfy_blocks) {
256 goto abort_with_mgmt_vector;
258 /* Setup the other blocks - the first n-1 vectors */
259 for (i = 0; i < priv->num_ntfy_blks; i++) {
260 struct gve_notify_block *block = &priv->ntfy_blocks[i];
263 snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
266 err = request_irq(priv->msix_vectors[msix_idx].vector,
267 gve_intr, 0, block->name, block);
269 dev_err(&priv->pdev->dev,
270 "Failed to receive msix vector %d\n", i);
271 goto abort_with_some_ntfy_blocks;
273 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
274 get_cpu_mask(i % active_cpus));
277 abort_with_some_ntfy_blocks:
278 for (j = 0; j < i; j++) {
279 struct gve_notify_block *block = &priv->ntfy_blocks[j];
282 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
284 free_irq(priv->msix_vectors[msix_idx].vector, block);
286 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
287 sizeof(*priv->ntfy_blocks),
288 priv->ntfy_blocks, priv->ntfy_block_bus);
289 priv->ntfy_blocks = NULL;
290 abort_with_mgmt_vector:
291 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
292 abort_with_msix_enabled:
293 pci_disable_msix(priv->pdev);
294 abort_with_msix_vectors:
295 kvfree(priv->msix_vectors);
296 priv->msix_vectors = NULL;
300 static void gve_free_notify_blocks(struct gve_priv *priv)
304 if (priv->msix_vectors) {
306 for (i = 0; i < priv->num_ntfy_blks; i++) {
307 struct gve_notify_block *block = &priv->ntfy_blocks[i];
310 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
312 free_irq(priv->msix_vectors[msix_idx].vector, block);
314 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
316 dma_free_coherent(&priv->pdev->dev,
317 priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
318 priv->ntfy_blocks, priv->ntfy_block_bus);
319 priv->ntfy_blocks = NULL;
320 pci_disable_msix(priv->pdev);
321 kvfree(priv->msix_vectors);
322 priv->msix_vectors = NULL;
325 static int gve_setup_device_resources(struct gve_priv *priv)
329 err = gve_alloc_counter_array(priv);
332 err = gve_alloc_notify_blocks(priv);
334 goto abort_with_counter;
335 err = gve_alloc_stats_report(priv);
337 goto abort_with_ntfy_blocks;
338 err = gve_adminq_configure_device_resources(priv,
339 priv->counter_array_bus,
340 priv->num_event_counters,
341 priv->ntfy_block_bus,
342 priv->num_ntfy_blks);
344 dev_err(&priv->pdev->dev,
345 "could not setup device_resources: err=%d\n", err);
347 goto abort_with_stats_report;
349 err = gve_adminq_report_stats(priv, priv->stats_report_len,
350 priv->stats_report_bus,
351 GVE_STATS_REPORT_TIMER_PERIOD);
353 dev_err(&priv->pdev->dev,
354 "Failed to report stats: err=%d\n", err);
355 gve_set_device_resources_ok(priv);
357 abort_with_stats_report:
358 gve_free_stats_report(priv);
359 abort_with_ntfy_blocks:
360 gve_free_notify_blocks(priv);
362 gve_free_counter_array(priv);
366 static void gve_trigger_reset(struct gve_priv *priv);
368 static void gve_teardown_device_resources(struct gve_priv *priv)
372 /* Tell device its resources are being freed */
373 if (gve_get_device_resources_ok(priv)) {
374 /* detach the stats report */
375 err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
377 dev_err(&priv->pdev->dev,
378 "Failed to detach stats report: err=%d\n", err);
379 gve_trigger_reset(priv);
381 err = gve_adminq_deconfigure_device_resources(priv);
383 dev_err(&priv->pdev->dev,
384 "Could not deconfigure device resources: err=%d\n",
386 gve_trigger_reset(priv);
389 gve_free_counter_array(priv);
390 gve_free_notify_blocks(priv);
391 gve_free_stats_report(priv);
392 gve_clear_device_resources_ok(priv);
395 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx)
397 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
399 netif_napi_add(priv->dev, &block->napi, gve_napi_poll,
403 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
405 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
407 netif_napi_del(&block->napi);
410 static int gve_register_qpls(struct gve_priv *priv)
412 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
416 for (i = 0; i < num_qpls; i++) {
417 err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
419 netif_err(priv, drv, priv->dev,
420 "failed to register queue page list %d\n",
422 /* This failure will trigger a reset - no need to clean
431 static int gve_unregister_qpls(struct gve_priv *priv)
433 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
437 for (i = 0; i < num_qpls; i++) {
438 err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
439 /* This failure will trigger a reset - no need to clean up */
441 netif_err(priv, drv, priv->dev,
442 "Failed to unregister queue page list %d\n",
450 static int gve_create_rings(struct gve_priv *priv)
455 err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
457 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
458 priv->tx_cfg.num_queues);
459 /* This failure will trigger a reset - no need to clean
464 netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
465 priv->tx_cfg.num_queues);
467 err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
469 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
470 priv->rx_cfg.num_queues);
471 /* This failure will trigger a reset - no need to clean
476 netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
477 priv->rx_cfg.num_queues);
479 /* Rx data ring has been prefilled with packet buffers at queue
481 * Write the doorbell to provide descriptor slots and packet buffers
484 for (i = 0; i < priv->rx_cfg.num_queues; i++)
485 gve_rx_write_doorbell(priv, &priv->rx[i]);
490 static int gve_alloc_rings(struct gve_priv *priv)
497 priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
501 err = gve_tx_alloc_rings(priv);
505 priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
511 err = gve_rx_alloc_rings(priv);
514 /* Add tx napi & init sync stats*/
515 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
516 u64_stats_init(&priv->tx[i].statss);
517 ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
518 gve_add_napi(priv, ntfy_idx);
520 /* Add rx napi & init sync stats*/
521 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
522 u64_stats_init(&priv->rx[i].statss);
523 ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
524 gve_add_napi(priv, ntfy_idx);
533 gve_tx_free_rings(priv);
540 static int gve_destroy_rings(struct gve_priv *priv)
544 err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
546 netif_err(priv, drv, priv->dev,
547 "failed to destroy tx queues\n");
548 /* This failure will trigger a reset - no need to clean up */
551 netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
552 err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
554 netif_err(priv, drv, priv->dev,
555 "failed to destroy rx queues\n");
556 /* This failure will trigger a reset - no need to clean up */
559 netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
563 static void gve_free_rings(struct gve_priv *priv)
569 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
570 ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
571 gve_remove_napi(priv, ntfy_idx);
573 gve_tx_free_rings(priv);
578 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
579 ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
580 gve_remove_napi(priv, ntfy_idx);
582 gve_rx_free_rings(priv);
588 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
589 struct page **page, dma_addr_t *dma,
590 enum dma_data_direction dir)
592 *page = alloc_page(GFP_KERNEL);
594 priv->page_alloc_fail++;
597 *dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
598 if (dma_mapping_error(dev, *dma)) {
599 priv->dma_mapping_error++;
606 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
609 struct gve_queue_page_list *qpl = &priv->qpls[id];
613 if (pages + priv->num_registered_pages > priv->max_registered_pages) {
614 netif_err(priv, drv, priv->dev,
615 "Reached max number of registered pages %llu > %llu\n",
616 pages + priv->num_registered_pages,
617 priv->max_registered_pages);
622 qpl->num_entries = 0;
623 qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
624 /* caller handles clean up */
627 qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses),
629 /* caller handles clean up */
630 if (!qpl->page_buses)
633 for (i = 0; i < pages; i++) {
634 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
636 gve_qpl_dma_dir(priv, id));
637 /* caller handles clean up */
642 priv->num_registered_pages += pages;
647 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
648 enum dma_data_direction dir)
650 if (!dma_mapping_error(dev, dma))
651 dma_unmap_page(dev, dma, PAGE_SIZE, dir);
656 static void gve_free_queue_page_list(struct gve_priv *priv,
659 struct gve_queue_page_list *qpl = &priv->qpls[id];
664 if (!qpl->page_buses)
667 for (i = 0; i < qpl->num_entries; i++)
668 gve_free_page(&priv->pdev->dev, qpl->pages[i],
669 qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
671 kvfree(qpl->page_buses);
674 priv->num_registered_pages -= qpl->num_entries;
677 static int gve_alloc_qpls(struct gve_priv *priv)
679 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
683 /* Raw addressing means no QPLs */
684 if (priv->raw_addressing)
687 priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
691 for (i = 0; i < gve_num_tx_qpls(priv); i++) {
692 err = gve_alloc_queue_page_list(priv, i,
693 priv->tx_pages_per_qpl);
697 for (; i < num_qpls; i++) {
698 err = gve_alloc_queue_page_list(priv, i,
699 priv->rx_data_slot_cnt);
704 priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
705 sizeof(unsigned long) * BITS_PER_BYTE;
706 priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) *
707 sizeof(unsigned long), GFP_KERNEL);
708 if (!priv->qpl_cfg.qpl_id_map) {
716 for (j = 0; j <= i; j++)
717 gve_free_queue_page_list(priv, j);
722 static void gve_free_qpls(struct gve_priv *priv)
724 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
727 /* Raw addressing means no QPLs */
728 if (priv->raw_addressing)
731 kvfree(priv->qpl_cfg.qpl_id_map);
733 for (i = 0; i < num_qpls; i++)
734 gve_free_queue_page_list(priv, i);
739 /* Use this to schedule a reset when the device is capable of continuing
740 * to handle other requests in its current state. If it is not, do a reset
743 void gve_schedule_reset(struct gve_priv *priv)
745 gve_set_do_reset(priv);
746 queue_work(priv->gve_wq, &priv->service_task);
749 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
750 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
751 static void gve_turndown(struct gve_priv *priv);
752 static void gve_turnup(struct gve_priv *priv);
754 static int gve_open(struct net_device *dev)
756 struct gve_priv *priv = netdev_priv(dev);
759 err = gve_alloc_qpls(priv);
762 err = gve_alloc_rings(priv);
766 err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
769 err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
773 err = gve_register_qpls(priv);
776 err = gve_create_rings(priv);
779 gve_set_device_rings_ok(priv);
781 if (gve_get_report_stats(priv))
782 mod_timer(&priv->stats_report_timer,
783 round_jiffies(jiffies +
784 msecs_to_jiffies(priv->stats_report_timer_period)));
787 queue_work(priv->gve_wq, &priv->service_task);
788 priv->interface_up_cnt++;
792 gve_free_rings(priv);
798 /* This must have been called from a reset due to the rtnl lock
799 * so just return at this point.
801 if (gve_get_reset_in_progress(priv))
803 /* Otherwise reset before returning */
804 gve_reset_and_teardown(priv, true);
805 /* if this fails there is nothing we can do so just ignore the return */
806 gve_reset_recovery(priv, false);
807 /* return the original error */
811 static int gve_close(struct net_device *dev)
813 struct gve_priv *priv = netdev_priv(dev);
816 netif_carrier_off(dev);
817 if (gve_get_device_rings_ok(priv)) {
819 err = gve_destroy_rings(priv);
822 err = gve_unregister_qpls(priv);
825 gve_clear_device_rings_ok(priv);
827 del_timer_sync(&priv->stats_report_timer);
829 gve_free_rings(priv);
831 priv->interface_down_cnt++;
835 /* This must have been called from a reset due to the rtnl lock
836 * so just return at this point.
838 if (gve_get_reset_in_progress(priv))
840 /* Otherwise reset before returning */
841 gve_reset_and_teardown(priv, true);
842 return gve_reset_recovery(priv, false);
845 int gve_adjust_queues(struct gve_priv *priv,
846 struct gve_queue_config new_rx_config,
847 struct gve_queue_config new_tx_config)
851 if (netif_carrier_ok(priv->dev)) {
852 /* To make this process as simple as possible we teardown the
853 * device, set the new configuration, and then bring the device
856 err = gve_close(priv->dev);
857 /* we have already tried to reset in close,
858 * just fail at this point
862 priv->tx_cfg = new_tx_config;
863 priv->rx_cfg = new_rx_config;
865 err = gve_open(priv->dev);
871 /* Set the config for the next up. */
872 priv->tx_cfg = new_tx_config;
873 priv->rx_cfg = new_rx_config;
877 netif_err(priv, drv, priv->dev,
878 "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
883 static void gve_turndown(struct gve_priv *priv)
887 if (netif_carrier_ok(priv->dev))
888 netif_carrier_off(priv->dev);
890 if (!gve_get_napi_enabled(priv))
893 /* Disable napi to prevent more work from coming in */
894 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
895 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
896 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
898 napi_disable(&block->napi);
900 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
901 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
902 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
904 napi_disable(&block->napi);
908 netif_tx_disable(priv->dev);
910 gve_clear_napi_enabled(priv);
911 gve_clear_report_stats(priv);
914 static void gve_turnup(struct gve_priv *priv)
918 /* Start the tx queues */
919 netif_tx_start_all_queues(priv->dev);
921 /* Enable napi and unmask interrupts for all queues */
922 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
923 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
924 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
926 napi_enable(&block->napi);
927 iowrite32be(0, gve_irq_doorbell(priv, block));
929 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
930 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
931 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
933 napi_enable(&block->napi);
934 iowrite32be(0, gve_irq_doorbell(priv, block));
937 gve_set_napi_enabled(priv);
940 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
942 struct gve_priv *priv = netdev_priv(dev);
944 gve_schedule_reset(priv);
945 priv->tx_timeo_cnt++;
948 static const struct net_device_ops gve_netdev_ops = {
949 .ndo_start_xmit = gve_tx,
950 .ndo_open = gve_open,
951 .ndo_stop = gve_close,
952 .ndo_get_stats64 = gve_get_stats,
953 .ndo_tx_timeout = gve_tx_timeout,
956 static void gve_handle_status(struct gve_priv *priv, u32 status)
958 if (GVE_DEVICE_STATUS_RESET_MASK & status) {
959 dev_info(&priv->pdev->dev, "Device requested reset.\n");
960 gve_set_do_reset(priv);
962 if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
963 priv->stats_report_trigger_cnt++;
964 gve_set_do_report_stats(priv);
968 static void gve_handle_reset(struct gve_priv *priv)
970 /* A service task will be scheduled at the end of probe to catch any
971 * resets that need to happen, and we don't want to reset until
974 if (gve_get_probe_in_progress(priv))
977 if (gve_get_do_reset(priv)) {
979 gve_reset(priv, false);
984 void gve_handle_report_stats(struct gve_priv *priv)
986 int idx, stats_idx = 0, tx_bytes;
987 unsigned int start = 0;
988 struct stats *stats = priv->stats_report->stats;
990 if (!gve_get_report_stats(priv))
993 be64_add_cpu(&priv->stats_report->written_count, 1);
996 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
998 start = u64_stats_fetch_begin(&priv->tx[idx].statss);
999 tx_bytes = priv->tx[idx].bytes_done;
1000 } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
1001 stats[stats_idx++] = (struct stats) {
1002 .stat_name = cpu_to_be32(TX_WAKE_CNT),
1003 .value = cpu_to_be64(priv->tx[idx].wake_queue),
1004 .queue_id = cpu_to_be32(idx),
1006 stats[stats_idx++] = (struct stats) {
1007 .stat_name = cpu_to_be32(TX_STOP_CNT),
1008 .value = cpu_to_be64(priv->tx[idx].stop_queue),
1009 .queue_id = cpu_to_be32(idx),
1011 stats[stats_idx++] = (struct stats) {
1012 .stat_name = cpu_to_be32(TX_FRAMES_SENT),
1013 .value = cpu_to_be64(priv->tx[idx].req),
1014 .queue_id = cpu_to_be32(idx),
1016 stats[stats_idx++] = (struct stats) {
1017 .stat_name = cpu_to_be32(TX_BYTES_SENT),
1018 .value = cpu_to_be64(tx_bytes),
1019 .queue_id = cpu_to_be32(idx),
1021 stats[stats_idx++] = (struct stats) {
1022 .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
1023 .value = cpu_to_be64(priv->tx[idx].done),
1024 .queue_id = cpu_to_be32(idx),
1030 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1031 stats[stats_idx++] = (struct stats) {
1032 .stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
1033 .value = cpu_to_be64(priv->rx[idx].desc.seqno),
1034 .queue_id = cpu_to_be32(idx),
1036 stats[stats_idx++] = (struct stats) {
1037 .stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
1038 .value = cpu_to_be64(priv->rx[0].fill_cnt),
1039 .queue_id = cpu_to_be32(idx),
1045 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1047 if (!gve_get_napi_enabled(priv))
1050 if (link_status == netif_carrier_ok(priv->dev))
1054 netdev_info(priv->dev, "Device link is up.\n");
1055 netif_carrier_on(priv->dev);
1057 netdev_info(priv->dev, "Device link is down.\n");
1058 netif_carrier_off(priv->dev);
1062 /* Handle NIC status register changes, reset requests and report stats */
1063 static void gve_service_task(struct work_struct *work)
1065 struct gve_priv *priv = container_of(work, struct gve_priv,
1067 u32 status = ioread32be(&priv->reg_bar0->device_status);
1069 gve_handle_status(priv, status);
1071 gve_handle_reset(priv);
1072 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1075 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1080 /* Set up the adminq */
1081 err = gve_adminq_alloc(&priv->pdev->dev, priv);
1083 dev_err(&priv->pdev->dev,
1084 "Failed to alloc admin queue: err=%d\n", err);
1088 if (skip_describe_device)
1091 priv->raw_addressing = false;
1092 /* Get the initial information we need from the device */
1093 err = gve_adminq_describe_device(priv);
1095 dev_err(&priv->pdev->dev,
1096 "Could not get device information: err=%d\n", err);
1099 if (priv->dev->max_mtu > PAGE_SIZE) {
1100 priv->dev->max_mtu = PAGE_SIZE;
1101 err = gve_adminq_set_mtu(priv, priv->dev->mtu);
1103 dev_err(&priv->pdev->dev, "Could not set mtu");
1107 priv->dev->mtu = priv->dev->max_mtu;
1108 num_ntfy = pci_msix_vec_count(priv->pdev);
1109 if (num_ntfy <= 0) {
1110 dev_err(&priv->pdev->dev,
1111 "could not count MSI-x vectors: err=%d\n", num_ntfy);
1114 } else if (num_ntfy < GVE_MIN_MSIX) {
1115 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
1116 GVE_MIN_MSIX, num_ntfy);
1121 priv->num_registered_pages = 0;
1122 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
1123 /* gvnic has one Notification Block per MSI-x vector, except for the
1126 priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1127 priv->mgmt_msix_idx = priv->num_ntfy_blks;
1129 priv->tx_cfg.max_queues =
1130 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
1131 priv->rx_cfg.max_queues =
1132 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
1134 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
1135 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
1136 if (priv->default_num_queues > 0) {
1137 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
1138 priv->tx_cfg.num_queues);
1139 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
1140 priv->rx_cfg.num_queues);
1143 dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
1144 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
1145 dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
1146 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
1149 err = gve_setup_device_resources(priv);
1153 gve_adminq_free(&priv->pdev->dev, priv);
1157 static void gve_teardown_priv_resources(struct gve_priv *priv)
1159 gve_teardown_device_resources(priv);
1160 gve_adminq_free(&priv->pdev->dev, priv);
1163 static void gve_trigger_reset(struct gve_priv *priv)
1165 /* Reset the device by releasing the AQ */
1166 gve_adminq_release(priv);
1169 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
1171 gve_trigger_reset(priv);
1172 /* With the reset having already happened, close cannot fail */
1174 gve_close(priv->dev);
1175 gve_teardown_priv_resources(priv);
1178 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
1182 err = gve_init_priv(priv, true);
1186 err = gve_open(priv->dev);
1192 dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
1197 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
1199 bool was_up = netif_carrier_ok(priv->dev);
1202 dev_info(&priv->pdev->dev, "Performing reset\n");
1203 gve_clear_do_reset(priv);
1204 gve_set_reset_in_progress(priv);
1205 /* If we aren't attempting to teardown normally, just go turndown and
1208 if (!attempt_teardown) {
1210 gve_reset_and_teardown(priv, was_up);
1212 /* Otherwise attempt to close normally */
1214 err = gve_close(priv->dev);
1215 /* If that fails reset as we did above */
1217 gve_reset_and_teardown(priv, was_up);
1219 /* Clean up any remaining resources */
1220 gve_teardown_priv_resources(priv);
1223 /* Set it all back up */
1224 err = gve_reset_recovery(priv, was_up);
1225 gve_clear_reset_in_progress(priv);
1227 priv->interface_up_cnt = 0;
1228 priv->interface_down_cnt = 0;
1229 priv->stats_report_trigger_cnt = 0;
1233 static void gve_write_version(u8 __iomem *driver_version_register)
1235 const char *c = gve_version_prefix;
1238 writeb(*c, driver_version_register);
1242 c = gve_version_str;
1244 writeb(*c, driver_version_register);
1247 writeb('\n', driver_version_register);
1250 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1252 int max_tx_queues, max_rx_queues;
1253 struct net_device *dev;
1254 __be32 __iomem *db_bar;
1255 struct gve_registers __iomem *reg_bar;
1256 struct gve_priv *priv;
1259 err = pci_enable_device(pdev);
1263 err = pci_request_regions(pdev, "gvnic-cfg");
1265 goto abort_with_enabled;
1267 pci_set_master(pdev);
1269 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1271 dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
1272 goto abort_with_pci_region;
1275 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1278 "Failed to set consistent dma mask: err=%d\n", err);
1279 goto abort_with_pci_region;
1282 reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
1284 dev_err(&pdev->dev, "Failed to map pci bar!\n");
1286 goto abort_with_pci_region;
1289 db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
1291 dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
1293 goto abort_with_reg_bar;
1296 gve_write_version(®_bar->driver_version);
1297 /* Get max queues to alloc etherdev */
1298 max_rx_queues = ioread32be(®_bar->max_tx_queues);
1299 max_tx_queues = ioread32be(®_bar->max_rx_queues);
1300 /* Alloc and setup the netdev and priv */
1301 dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
1303 dev_err(&pdev->dev, "could not allocate netdev\n");
1304 goto abort_with_db_bar;
1306 SET_NETDEV_DEV(dev, &pdev->dev);
1307 pci_set_drvdata(pdev, dev);
1308 dev->ethtool_ops = &gve_ethtool_ops;
1309 dev->netdev_ops = &gve_netdev_ops;
1310 /* advertise features */
1311 dev->hw_features = NETIF_F_HIGHDMA;
1312 dev->hw_features |= NETIF_F_SG;
1313 dev->hw_features |= NETIF_F_HW_CSUM;
1314 dev->hw_features |= NETIF_F_TSO;
1315 dev->hw_features |= NETIF_F_TSO6;
1316 dev->hw_features |= NETIF_F_TSO_ECN;
1317 dev->hw_features |= NETIF_F_RXCSUM;
1318 dev->hw_features |= NETIF_F_RXHASH;
1319 dev->features = dev->hw_features;
1320 dev->watchdog_timeo = 5 * HZ;
1321 dev->min_mtu = ETH_MIN_MTU;
1322 netif_carrier_off(dev);
1324 priv = netdev_priv(dev);
1327 priv->msg_enable = DEFAULT_MSG_LEVEL;
1328 priv->reg_bar0 = reg_bar;
1329 priv->db_bar2 = db_bar;
1330 priv->service_task_flags = 0x0;
1331 priv->state_flags = 0x0;
1332 priv->ethtool_flags = 0x0;
1334 gve_set_probe_in_progress(priv);
1335 priv->gve_wq = alloc_ordered_workqueue("gve", 0);
1336 if (!priv->gve_wq) {
1337 dev_err(&pdev->dev, "Could not allocate workqueue");
1339 goto abort_with_netdev;
1341 INIT_WORK(&priv->service_task, gve_service_task);
1342 INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
1343 priv->tx_cfg.max_queues = max_tx_queues;
1344 priv->rx_cfg.max_queues = max_rx_queues;
1346 err = gve_init_priv(priv, false);
1350 err = register_netdev(dev);
1354 dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
1355 gve_clear_probe_in_progress(priv);
1356 queue_work(priv->gve_wq, &priv->service_task);
1360 destroy_workqueue(priv->gve_wq);
1366 pci_iounmap(pdev, db_bar);
1369 pci_iounmap(pdev, reg_bar);
1371 abort_with_pci_region:
1372 pci_release_regions(pdev);
1375 pci_disable_device(pdev);
1379 static void gve_remove(struct pci_dev *pdev)
1381 struct net_device *netdev = pci_get_drvdata(pdev);
1382 struct gve_priv *priv = netdev_priv(netdev);
1383 __be32 __iomem *db_bar = priv->db_bar2;
1384 void __iomem *reg_bar = priv->reg_bar0;
1386 unregister_netdev(netdev);
1387 gve_teardown_priv_resources(priv);
1388 destroy_workqueue(priv->gve_wq);
1389 free_netdev(netdev);
1390 pci_iounmap(pdev, db_bar);
1391 pci_iounmap(pdev, reg_bar);
1392 pci_release_regions(pdev);
1393 pci_disable_device(pdev);
1396 static const struct pci_device_id gve_id_table[] = {
1397 { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
1401 static struct pci_driver gvnic_driver = {
1403 .id_table = gve_id_table,
1405 .remove = gve_remove,
1408 module_pci_driver(gvnic_driver);
1410 MODULE_DEVICE_TABLE(pci, gve_id_table);
1411 MODULE_AUTHOR("Google, Inc.");
1412 MODULE_DESCRIPTION("gVNIC Driver");
1413 MODULE_LICENSE("Dual MIT/GPL");
1414 MODULE_VERSION(GVE_VERSION);