1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
4 * Copyright (C) 2015-2021 Google, Inc.
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/sched.h>
13 #include <linux/timer.h>
14 #include <linux/workqueue.h>
15 #include <net/sch_generic.h>
18 #include "gve_adminq.h"
19 #include "gve_register.h"
21 #define GVE_DEFAULT_RX_COPYBREAK (256)
23 #define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK)
24 #define GVE_VERSION "1.0.0"
25 #define GVE_VERSION_PREFIX "GVE-"
27 const char gve_version_str[] = GVE_VERSION;
28 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
30 static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
32 struct gve_priv *priv = netdev_priv(dev);
35 return gve_tx(skb, dev);
37 return gve_tx_dqo(skb, dev);
40 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
42 struct gve_priv *priv = netdev_priv(dev);
47 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
50 u64_stats_fetch_begin(&priv->rx[ring].statss);
51 s->rx_packets += priv->rx[ring].rpackets;
52 s->rx_bytes += priv->rx[ring].rbytes;
53 } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
58 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
61 u64_stats_fetch_begin(&priv->tx[ring].statss);
62 s->tx_packets += priv->tx[ring].pkt_done;
63 s->tx_bytes += priv->tx[ring].bytes_done;
64 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
70 static int gve_alloc_counter_array(struct gve_priv *priv)
73 dma_alloc_coherent(&priv->pdev->dev,
74 priv->num_event_counters *
75 sizeof(*priv->counter_array),
76 &priv->counter_array_bus, GFP_KERNEL);
77 if (!priv->counter_array)
83 static void gve_free_counter_array(struct gve_priv *priv)
85 dma_free_coherent(&priv->pdev->dev,
86 priv->num_event_counters *
87 sizeof(*priv->counter_array),
88 priv->counter_array, priv->counter_array_bus);
89 priv->counter_array = NULL;
92 /* NIC requests to report stats */
93 static void gve_stats_report_task(struct work_struct *work)
95 struct gve_priv *priv = container_of(work, struct gve_priv,
97 if (gve_get_do_report_stats(priv)) {
98 gve_handle_report_stats(priv);
99 gve_clear_do_report_stats(priv);
103 static void gve_stats_report_schedule(struct gve_priv *priv)
105 if (!gve_get_probe_in_progress(priv) &&
106 !gve_get_reset_in_progress(priv)) {
107 gve_set_do_report_stats(priv);
108 queue_work(priv->gve_wq, &priv->stats_report_task);
112 static void gve_stats_report_timer(struct timer_list *t)
114 struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
116 mod_timer(&priv->stats_report_timer,
117 round_jiffies(jiffies +
118 msecs_to_jiffies(priv->stats_report_timer_period)));
119 gve_stats_report_schedule(priv);
122 static int gve_alloc_stats_report(struct gve_priv *priv)
124 int tx_stats_num, rx_stats_num;
126 tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
127 priv->tx_cfg.num_queues;
128 rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
129 priv->rx_cfg.num_queues;
130 priv->stats_report_len = struct_size(priv->stats_report, stats,
131 tx_stats_num + rx_stats_num);
133 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
134 &priv->stats_report_bus, GFP_KERNEL);
135 if (!priv->stats_report)
137 /* Set up timer for the report-stats task */
138 timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
139 priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
143 static void gve_free_stats_report(struct gve_priv *priv)
145 del_timer_sync(&priv->stats_report_timer);
146 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
147 priv->stats_report, priv->stats_report_bus);
148 priv->stats_report = NULL;
151 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
153 struct gve_priv *priv = arg;
155 queue_work(priv->gve_wq, &priv->service_task);
159 static irqreturn_t gve_intr(int irq, void *arg)
161 struct gve_notify_block *block = arg;
162 struct gve_priv *priv = block->priv;
164 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
165 napi_schedule_irqoff(&block->napi);
169 static irqreturn_t gve_intr_dqo(int irq, void *arg)
171 struct gve_notify_block *block = arg;
173 /* Interrupts are automatically masked */
174 napi_schedule_irqoff(&block->napi);
178 static int gve_napi_poll(struct napi_struct *napi, int budget)
180 struct gve_notify_block *block;
181 __be32 __iomem *irq_doorbell;
182 bool reschedule = false;
183 struct gve_priv *priv;
185 block = container_of(napi, struct gve_notify_block, napi);
189 reschedule |= gve_tx_poll(block, budget);
191 reschedule |= gve_rx_poll(block, budget);
197 irq_doorbell = gve_irq_doorbell(priv, block);
198 iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
200 /* Double check we have no extra work.
201 * Ensure unmask synchronizes with checking for work.
205 reschedule |= gve_tx_poll(block, -1);
207 reschedule |= gve_rx_poll(block, -1);
208 if (reschedule && napi_reschedule(napi))
209 iowrite32be(GVE_IRQ_MASK, irq_doorbell);
214 static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
216 struct gve_notify_block *block =
217 container_of(napi, struct gve_notify_block, napi);
218 struct gve_priv *priv = block->priv;
219 bool reschedule = false;
222 /* Clear PCI MSI-X Pending Bit Array (PBA)
224 * This bit is set if an interrupt event occurs while the vector is
225 * masked. If this bit is set and we reenable the interrupt, it will
226 * fire again. Since we're just about to poll the queue state, we don't
227 * need it to fire again.
229 * Under high softirq load, it's possible that the interrupt condition
230 * is triggered twice before we got the chance to process it.
232 gve_write_irq_doorbell_dqo(priv, block,
233 GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
236 reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
239 work_done = gve_rx_poll_dqo(block, budget);
240 reschedule |= work_done == budget;
246 if (likely(napi_complete_done(napi, work_done))) {
247 /* Enable interrupts again.
249 * We don't need to repoll afterwards because HW supports the
250 * PCI MSI-X PBA feature.
252 * Another interrupt would be triggered if a new event came in
253 * since the last one.
255 gve_write_irq_doorbell_dqo(priv, block,
256 GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
262 static int gve_alloc_notify_blocks(struct gve_priv *priv)
264 int num_vecs_requested = priv->num_ntfy_blks + 1;
265 char *name = priv->dev->name;
266 unsigned int active_cpus;
271 priv->msix_vectors = kvzalloc(num_vecs_requested *
272 sizeof(*priv->msix_vectors), GFP_KERNEL);
273 if (!priv->msix_vectors)
275 for (i = 0; i < num_vecs_requested; i++)
276 priv->msix_vectors[i].entry = i;
277 vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
278 GVE_MIN_MSIX, num_vecs_requested);
279 if (vecs_enabled < 0) {
280 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
281 GVE_MIN_MSIX, vecs_enabled);
283 goto abort_with_msix_vectors;
285 if (vecs_enabled != num_vecs_requested) {
286 int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
287 int vecs_per_type = new_num_ntfy_blks / 2;
288 int vecs_left = new_num_ntfy_blks % 2;
290 priv->num_ntfy_blks = new_num_ntfy_blks;
291 priv->mgmt_msix_idx = priv->num_ntfy_blks;
292 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
294 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
295 vecs_per_type + vecs_left);
296 dev_err(&priv->pdev->dev,
297 "Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
298 vecs_enabled, priv->tx_cfg.max_queues,
299 priv->rx_cfg.max_queues);
300 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
301 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
302 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
303 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
305 /* Half the notification blocks go to TX and half to RX */
306 active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
308 /* Setup Management Vector - the last vector */
309 snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
311 err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
312 gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
314 dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
315 goto abort_with_msix_enabled;
318 dma_alloc_coherent(&priv->pdev->dev,
319 priv->num_ntfy_blks *
320 sizeof(*priv->ntfy_blocks),
321 &priv->ntfy_block_bus, GFP_KERNEL);
322 if (!priv->ntfy_blocks) {
324 goto abort_with_mgmt_vector;
326 /* Setup the other blocks - the first n-1 vectors */
327 for (i = 0; i < priv->num_ntfy_blks; i++) {
328 struct gve_notify_block *block = &priv->ntfy_blocks[i];
331 snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
334 err = request_irq(priv->msix_vectors[msix_idx].vector,
335 gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
336 0, block->name, block);
338 dev_err(&priv->pdev->dev,
339 "Failed to receive msix vector %d\n", i);
340 goto abort_with_some_ntfy_blocks;
342 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
343 get_cpu_mask(i % active_cpus));
346 abort_with_some_ntfy_blocks:
347 for (j = 0; j < i; j++) {
348 struct gve_notify_block *block = &priv->ntfy_blocks[j];
351 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
353 free_irq(priv->msix_vectors[msix_idx].vector, block);
355 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
356 sizeof(*priv->ntfy_blocks),
357 priv->ntfy_blocks, priv->ntfy_block_bus);
358 priv->ntfy_blocks = NULL;
359 abort_with_mgmt_vector:
360 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
361 abort_with_msix_enabled:
362 pci_disable_msix(priv->pdev);
363 abort_with_msix_vectors:
364 kvfree(priv->msix_vectors);
365 priv->msix_vectors = NULL;
369 static void gve_free_notify_blocks(struct gve_priv *priv)
373 if (priv->msix_vectors) {
375 for (i = 0; i < priv->num_ntfy_blks; i++) {
376 struct gve_notify_block *block = &priv->ntfy_blocks[i];
379 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
381 free_irq(priv->msix_vectors[msix_idx].vector, block);
383 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
385 dma_free_coherent(&priv->pdev->dev,
386 priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
387 priv->ntfy_blocks, priv->ntfy_block_bus);
388 priv->ntfy_blocks = NULL;
389 pci_disable_msix(priv->pdev);
390 kvfree(priv->msix_vectors);
391 priv->msix_vectors = NULL;
394 static int gve_setup_device_resources(struct gve_priv *priv)
398 err = gve_alloc_counter_array(priv);
401 err = gve_alloc_notify_blocks(priv);
403 goto abort_with_counter;
404 err = gve_alloc_stats_report(priv);
406 goto abort_with_ntfy_blocks;
407 err = gve_adminq_configure_device_resources(priv,
408 priv->counter_array_bus,
409 priv->num_event_counters,
410 priv->ntfy_block_bus,
411 priv->num_ntfy_blks);
413 dev_err(&priv->pdev->dev,
414 "could not setup device_resources: err=%d\n", err);
416 goto abort_with_stats_report;
419 if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
420 priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
422 if (!priv->ptype_lut_dqo) {
424 goto abort_with_stats_report;
426 err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
428 dev_err(&priv->pdev->dev,
429 "Failed to get ptype map: err=%d\n", err);
430 goto abort_with_ptype_lut;
434 err = gve_adminq_report_stats(priv, priv->stats_report_len,
435 priv->stats_report_bus,
436 GVE_STATS_REPORT_TIMER_PERIOD);
438 dev_err(&priv->pdev->dev,
439 "Failed to report stats: err=%d\n", err);
440 gve_set_device_resources_ok(priv);
443 abort_with_ptype_lut:
444 kvfree(priv->ptype_lut_dqo);
445 priv->ptype_lut_dqo = NULL;
446 abort_with_stats_report:
447 gve_free_stats_report(priv);
448 abort_with_ntfy_blocks:
449 gve_free_notify_blocks(priv);
451 gve_free_counter_array(priv);
456 static void gve_trigger_reset(struct gve_priv *priv);
458 static void gve_teardown_device_resources(struct gve_priv *priv)
462 /* Tell device its resources are being freed */
463 if (gve_get_device_resources_ok(priv)) {
464 /* detach the stats report */
465 err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
467 dev_err(&priv->pdev->dev,
468 "Failed to detach stats report: err=%d\n", err);
469 gve_trigger_reset(priv);
471 err = gve_adminq_deconfigure_device_resources(priv);
473 dev_err(&priv->pdev->dev,
474 "Could not deconfigure device resources: err=%d\n",
476 gve_trigger_reset(priv);
480 kvfree(priv->ptype_lut_dqo);
481 priv->ptype_lut_dqo = NULL;
483 gve_free_counter_array(priv);
484 gve_free_notify_blocks(priv);
485 gve_free_stats_report(priv);
486 gve_clear_device_resources_ok(priv);
489 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
490 int (*gve_poll)(struct napi_struct *, int))
492 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
494 netif_napi_add(priv->dev, &block->napi, gve_poll,
498 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
500 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
502 netif_napi_del(&block->napi);
505 static int gve_register_qpls(struct gve_priv *priv)
507 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
511 for (i = 0; i < num_qpls; i++) {
512 err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
514 netif_err(priv, drv, priv->dev,
515 "failed to register queue page list %d\n",
517 /* This failure will trigger a reset - no need to clean
526 static int gve_unregister_qpls(struct gve_priv *priv)
528 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
532 for (i = 0; i < num_qpls; i++) {
533 err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
534 /* This failure will trigger a reset - no need to clean up */
536 netif_err(priv, drv, priv->dev,
537 "Failed to unregister queue page list %d\n",
545 static int gve_create_rings(struct gve_priv *priv)
550 err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
552 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
553 priv->tx_cfg.num_queues);
554 /* This failure will trigger a reset - no need to clean
559 netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
560 priv->tx_cfg.num_queues);
562 err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
564 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
565 priv->rx_cfg.num_queues);
566 /* This failure will trigger a reset - no need to clean
571 netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
572 priv->rx_cfg.num_queues);
574 if (gve_is_gqi(priv)) {
575 /* Rx data ring has been prefilled with packet buffers at queue
578 * Write the doorbell to provide descriptor slots and packet
579 * buffers to the NIC.
581 for (i = 0; i < priv->rx_cfg.num_queues; i++)
582 gve_rx_write_doorbell(priv, &priv->rx[i]);
584 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
585 /* Post buffers and ring doorbell. */
586 gve_rx_post_buffers_dqo(&priv->rx[i]);
593 static void add_napi_init_sync_stats(struct gve_priv *priv,
594 int (*napi_poll)(struct napi_struct *napi,
599 /* Add tx napi & init sync stats*/
600 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
601 int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
603 u64_stats_init(&priv->tx[i].statss);
604 priv->tx[i].ntfy_id = ntfy_idx;
605 gve_add_napi(priv, ntfy_idx, napi_poll);
607 /* Add rx napi & init sync stats*/
608 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
609 int ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
611 u64_stats_init(&priv->rx[i].statss);
612 priv->rx[i].ntfy_id = ntfy_idx;
613 gve_add_napi(priv, ntfy_idx, napi_poll);
617 static void gve_tx_free_rings(struct gve_priv *priv)
619 if (gve_is_gqi(priv)) {
620 gve_tx_free_rings_gqi(priv);
622 gve_tx_free_rings_dqo(priv);
626 static int gve_alloc_rings(struct gve_priv *priv)
631 priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
636 if (gve_is_gqi(priv))
637 err = gve_tx_alloc_rings(priv);
639 err = gve_tx_alloc_rings_dqo(priv);
644 priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
651 if (gve_is_gqi(priv))
652 err = gve_rx_alloc_rings(priv);
654 err = gve_rx_alloc_rings_dqo(priv);
658 if (gve_is_gqi(priv))
659 add_napi_init_sync_stats(priv, gve_napi_poll);
661 add_napi_init_sync_stats(priv, gve_napi_poll_dqo);
669 gve_tx_free_rings(priv);
676 static int gve_destroy_rings(struct gve_priv *priv)
680 err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
682 netif_err(priv, drv, priv->dev,
683 "failed to destroy tx queues\n");
684 /* This failure will trigger a reset - no need to clean up */
687 netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
688 err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
690 netif_err(priv, drv, priv->dev,
691 "failed to destroy rx queues\n");
692 /* This failure will trigger a reset - no need to clean up */
695 netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
699 static void gve_rx_free_rings(struct gve_priv *priv)
701 if (gve_is_gqi(priv))
702 gve_rx_free_rings_gqi(priv);
704 gve_rx_free_rings_dqo(priv);
707 static void gve_free_rings(struct gve_priv *priv)
713 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
714 ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
715 gve_remove_napi(priv, ntfy_idx);
717 gve_tx_free_rings(priv);
722 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
723 ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
724 gve_remove_napi(priv, ntfy_idx);
726 gve_rx_free_rings(priv);
732 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
733 struct page **page, dma_addr_t *dma,
734 enum dma_data_direction dir)
736 *page = alloc_page(GFP_KERNEL);
738 priv->page_alloc_fail++;
741 *dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
742 if (dma_mapping_error(dev, *dma)) {
743 priv->dma_mapping_error++;
750 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
753 struct gve_queue_page_list *qpl = &priv->qpls[id];
757 if (pages + priv->num_registered_pages > priv->max_registered_pages) {
758 netif_err(priv, drv, priv->dev,
759 "Reached max number of registered pages %llu > %llu\n",
760 pages + priv->num_registered_pages,
761 priv->max_registered_pages);
766 qpl->num_entries = 0;
767 qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
768 /* caller handles clean up */
771 qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses),
773 /* caller handles clean up */
774 if (!qpl->page_buses)
777 for (i = 0; i < pages; i++) {
778 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
780 gve_qpl_dma_dir(priv, id));
781 /* caller handles clean up */
786 priv->num_registered_pages += pages;
791 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
792 enum dma_data_direction dir)
794 if (!dma_mapping_error(dev, dma))
795 dma_unmap_page(dev, dma, PAGE_SIZE, dir);
800 static void gve_free_queue_page_list(struct gve_priv *priv,
803 struct gve_queue_page_list *qpl = &priv->qpls[id];
808 if (!qpl->page_buses)
811 for (i = 0; i < qpl->num_entries; i++)
812 gve_free_page(&priv->pdev->dev, qpl->pages[i],
813 qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
815 kvfree(qpl->page_buses);
818 priv->num_registered_pages -= qpl->num_entries;
821 static int gve_alloc_qpls(struct gve_priv *priv)
823 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
827 /* Raw addressing means no QPLs */
828 if (priv->queue_format == GVE_GQI_RDA_FORMAT)
831 priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
835 for (i = 0; i < gve_num_tx_qpls(priv); i++) {
836 err = gve_alloc_queue_page_list(priv, i,
837 priv->tx_pages_per_qpl);
841 for (; i < num_qpls; i++) {
842 err = gve_alloc_queue_page_list(priv, i,
843 priv->rx_data_slot_cnt);
848 priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
849 sizeof(unsigned long) * BITS_PER_BYTE;
850 priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) *
851 sizeof(unsigned long), GFP_KERNEL);
852 if (!priv->qpl_cfg.qpl_id_map) {
860 for (j = 0; j <= i; j++)
861 gve_free_queue_page_list(priv, j);
866 static void gve_free_qpls(struct gve_priv *priv)
868 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
871 /* Raw addressing means no QPLs */
872 if (priv->queue_format == GVE_GQI_RDA_FORMAT)
875 kvfree(priv->qpl_cfg.qpl_id_map);
877 for (i = 0; i < num_qpls; i++)
878 gve_free_queue_page_list(priv, i);
883 /* Use this to schedule a reset when the device is capable of continuing
884 * to handle other requests in its current state. If it is not, do a reset
887 void gve_schedule_reset(struct gve_priv *priv)
889 gve_set_do_reset(priv);
890 queue_work(priv->gve_wq, &priv->service_task);
893 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
894 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
895 static void gve_turndown(struct gve_priv *priv);
896 static void gve_turnup(struct gve_priv *priv);
898 static int gve_open(struct net_device *dev)
900 struct gve_priv *priv = netdev_priv(dev);
903 err = gve_alloc_qpls(priv);
907 err = gve_alloc_rings(priv);
911 err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
914 err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
918 err = gve_register_qpls(priv);
922 if (!gve_is_gqi(priv)) {
923 /* Hard code this for now. This may be tuned in the future for
926 priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
928 err = gve_create_rings(priv);
932 gve_set_device_rings_ok(priv);
934 if (gve_get_report_stats(priv))
935 mod_timer(&priv->stats_report_timer,
936 round_jiffies(jiffies +
937 msecs_to_jiffies(priv->stats_report_timer_period)));
940 queue_work(priv->gve_wq, &priv->service_task);
941 priv->interface_up_cnt++;
945 gve_free_rings(priv);
951 /* This must have been called from a reset due to the rtnl lock
952 * so just return at this point.
954 if (gve_get_reset_in_progress(priv))
956 /* Otherwise reset before returning */
957 gve_reset_and_teardown(priv, true);
958 /* if this fails there is nothing we can do so just ignore the return */
959 gve_reset_recovery(priv, false);
960 /* return the original error */
964 static int gve_close(struct net_device *dev)
966 struct gve_priv *priv = netdev_priv(dev);
969 netif_carrier_off(dev);
970 if (gve_get_device_rings_ok(priv)) {
972 err = gve_destroy_rings(priv);
975 err = gve_unregister_qpls(priv);
978 gve_clear_device_rings_ok(priv);
980 del_timer_sync(&priv->stats_report_timer);
982 gve_free_rings(priv);
984 priv->interface_down_cnt++;
988 /* This must have been called from a reset due to the rtnl lock
989 * so just return at this point.
991 if (gve_get_reset_in_progress(priv))
993 /* Otherwise reset before returning */
994 gve_reset_and_teardown(priv, true);
995 return gve_reset_recovery(priv, false);
998 int gve_adjust_queues(struct gve_priv *priv,
999 struct gve_queue_config new_rx_config,
1000 struct gve_queue_config new_tx_config)
1004 if (netif_carrier_ok(priv->dev)) {
1005 /* To make this process as simple as possible we teardown the
1006 * device, set the new configuration, and then bring the device
1009 err = gve_close(priv->dev);
1010 /* we have already tried to reset in close,
1011 * just fail at this point
1015 priv->tx_cfg = new_tx_config;
1016 priv->rx_cfg = new_rx_config;
1018 err = gve_open(priv->dev);
1024 /* Set the config for the next up. */
1025 priv->tx_cfg = new_tx_config;
1026 priv->rx_cfg = new_rx_config;
1030 netif_err(priv, drv, priv->dev,
1031 "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
1036 static void gve_turndown(struct gve_priv *priv)
1040 if (netif_carrier_ok(priv->dev))
1041 netif_carrier_off(priv->dev);
1043 if (!gve_get_napi_enabled(priv))
1046 /* Disable napi to prevent more work from coming in */
1047 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1048 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1049 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1051 napi_disable(&block->napi);
1053 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1054 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1055 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1057 napi_disable(&block->napi);
1060 /* Stop tx queues */
1061 netif_tx_disable(priv->dev);
1063 gve_clear_napi_enabled(priv);
1064 gve_clear_report_stats(priv);
1067 static void gve_turnup(struct gve_priv *priv)
1071 /* Start the tx queues */
1072 netif_tx_start_all_queues(priv->dev);
1074 /* Enable napi and unmask interrupts for all queues */
1075 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1076 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1077 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1079 napi_enable(&block->napi);
1080 if (gve_is_gqi(priv)) {
1081 iowrite32be(0, gve_irq_doorbell(priv, block));
1083 u32 val = gve_set_itr_ratelimit_dqo(GVE_TX_IRQ_RATELIMIT_US_DQO);
1085 gve_write_irq_doorbell_dqo(priv, block, val);
1088 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1089 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1090 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1092 napi_enable(&block->napi);
1093 if (gve_is_gqi(priv)) {
1094 iowrite32be(0, gve_irq_doorbell(priv, block));
1096 u32 val = gve_set_itr_ratelimit_dqo(GVE_RX_IRQ_RATELIMIT_US_DQO);
1098 gve_write_irq_doorbell_dqo(priv, block, val);
1102 gve_set_napi_enabled(priv);
1105 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
1107 struct gve_priv *priv = netdev_priv(dev);
1109 gve_schedule_reset(priv);
1110 priv->tx_timeo_cnt++;
1113 static int gve_set_features(struct net_device *netdev,
1114 netdev_features_t features)
1116 const netdev_features_t orig_features = netdev->features;
1117 struct gve_priv *priv = netdev_priv(netdev);
1120 if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
1121 netdev->features ^= NETIF_F_LRO;
1122 if (netif_carrier_ok(netdev)) {
1123 /* To make this process as simple as possible we
1124 * teardown the device, set the new configuration,
1125 * and then bring the device up again.
1127 err = gve_close(netdev);
1128 /* We have already tried to reset in close, just fail
1134 err = gve_open(netdev);
1142 /* Reverts the change on error. */
1143 netdev->features = orig_features;
1144 netif_err(priv, drv, netdev,
1145 "Set features failed! !!! DISABLING ALL QUEUES !!!\n");
1149 static const struct net_device_ops gve_netdev_ops = {
1150 .ndo_start_xmit = gve_start_xmit,
1151 .ndo_open = gve_open,
1152 .ndo_stop = gve_close,
1153 .ndo_get_stats64 = gve_get_stats,
1154 .ndo_tx_timeout = gve_tx_timeout,
1155 .ndo_set_features = gve_set_features,
1158 static void gve_handle_status(struct gve_priv *priv, u32 status)
1160 if (GVE_DEVICE_STATUS_RESET_MASK & status) {
1161 dev_info(&priv->pdev->dev, "Device requested reset.\n");
1162 gve_set_do_reset(priv);
1164 if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
1165 priv->stats_report_trigger_cnt++;
1166 gve_set_do_report_stats(priv);
1170 static void gve_handle_reset(struct gve_priv *priv)
1172 /* A service task will be scheduled at the end of probe to catch any
1173 * resets that need to happen, and we don't want to reset until
1176 if (gve_get_probe_in_progress(priv))
1179 if (gve_get_do_reset(priv)) {
1181 gve_reset(priv, false);
1186 void gve_handle_report_stats(struct gve_priv *priv)
1188 int idx, stats_idx = 0, tx_bytes;
1189 unsigned int start = 0;
1190 struct stats *stats = priv->stats_report->stats;
1192 if (!gve_get_report_stats(priv))
1195 be64_add_cpu(&priv->stats_report->written_count, 1);
1198 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1199 u32 last_completion = 0;
1202 /* DQO doesn't currently support these metrics. */
1203 if (gve_is_gqi(priv)) {
1204 last_completion = priv->tx[idx].done;
1205 tx_frames = priv->tx[idx].req;
1209 start = u64_stats_fetch_begin(&priv->tx[idx].statss);
1210 tx_bytes = priv->tx[idx].bytes_done;
1211 } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
1212 stats[stats_idx++] = (struct stats) {
1213 .stat_name = cpu_to_be32(TX_WAKE_CNT),
1214 .value = cpu_to_be64(priv->tx[idx].wake_queue),
1215 .queue_id = cpu_to_be32(idx),
1217 stats[stats_idx++] = (struct stats) {
1218 .stat_name = cpu_to_be32(TX_STOP_CNT),
1219 .value = cpu_to_be64(priv->tx[idx].stop_queue),
1220 .queue_id = cpu_to_be32(idx),
1222 stats[stats_idx++] = (struct stats) {
1223 .stat_name = cpu_to_be32(TX_FRAMES_SENT),
1224 .value = cpu_to_be64(tx_frames),
1225 .queue_id = cpu_to_be32(idx),
1227 stats[stats_idx++] = (struct stats) {
1228 .stat_name = cpu_to_be32(TX_BYTES_SENT),
1229 .value = cpu_to_be64(tx_bytes),
1230 .queue_id = cpu_to_be32(idx),
1232 stats[stats_idx++] = (struct stats) {
1233 .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
1234 .value = cpu_to_be64(last_completion),
1235 .queue_id = cpu_to_be32(idx),
1241 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1242 stats[stats_idx++] = (struct stats) {
1243 .stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
1244 .value = cpu_to_be64(priv->rx[idx].desc.seqno),
1245 .queue_id = cpu_to_be32(idx),
1247 stats[stats_idx++] = (struct stats) {
1248 .stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
1249 .value = cpu_to_be64(priv->rx[0].fill_cnt),
1250 .queue_id = cpu_to_be32(idx),
1256 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1258 if (!gve_get_napi_enabled(priv))
1261 if (link_status == netif_carrier_ok(priv->dev))
1265 netdev_info(priv->dev, "Device link is up.\n");
1266 netif_carrier_on(priv->dev);
1268 netdev_info(priv->dev, "Device link is down.\n");
1269 netif_carrier_off(priv->dev);
1273 /* Handle NIC status register changes, reset requests and report stats */
1274 static void gve_service_task(struct work_struct *work)
1276 struct gve_priv *priv = container_of(work, struct gve_priv,
1278 u32 status = ioread32be(&priv->reg_bar0->device_status);
1280 gve_handle_status(priv, status);
1282 gve_handle_reset(priv);
1283 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1286 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1291 /* Set up the adminq */
1292 err = gve_adminq_alloc(&priv->pdev->dev, priv);
1294 dev_err(&priv->pdev->dev,
1295 "Failed to alloc admin queue: err=%d\n", err);
1299 if (skip_describe_device)
1302 priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
1303 /* Get the initial information we need from the device */
1304 err = gve_adminq_describe_device(priv);
1306 dev_err(&priv->pdev->dev,
1307 "Could not get device information: err=%d\n", err);
1310 if (gve_is_gqi(priv) && priv->dev->max_mtu > PAGE_SIZE) {
1311 priv->dev->max_mtu = PAGE_SIZE;
1312 err = gve_adminq_set_mtu(priv, priv->dev->mtu);
1314 dev_err(&priv->pdev->dev, "Could not set mtu");
1318 priv->dev->mtu = priv->dev->max_mtu;
1319 num_ntfy = pci_msix_vec_count(priv->pdev);
1320 if (num_ntfy <= 0) {
1321 dev_err(&priv->pdev->dev,
1322 "could not count MSI-x vectors: err=%d\n", num_ntfy);
1325 } else if (num_ntfy < GVE_MIN_MSIX) {
1326 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
1327 GVE_MIN_MSIX, num_ntfy);
1332 priv->num_registered_pages = 0;
1333 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
1334 /* gvnic has one Notification Block per MSI-x vector, except for the
1337 priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1338 priv->mgmt_msix_idx = priv->num_ntfy_blks;
1340 priv->tx_cfg.max_queues =
1341 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
1342 priv->rx_cfg.max_queues =
1343 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
1345 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
1346 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
1347 if (priv->default_num_queues > 0) {
1348 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
1349 priv->tx_cfg.num_queues);
1350 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
1351 priv->rx_cfg.num_queues);
1354 dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
1355 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
1356 dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
1357 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
1360 err = gve_setup_device_resources(priv);
1364 gve_adminq_free(&priv->pdev->dev, priv);
1368 static void gve_teardown_priv_resources(struct gve_priv *priv)
1370 gve_teardown_device_resources(priv);
1371 gve_adminq_free(&priv->pdev->dev, priv);
1374 static void gve_trigger_reset(struct gve_priv *priv)
1376 /* Reset the device by releasing the AQ */
1377 gve_adminq_release(priv);
1380 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
1382 gve_trigger_reset(priv);
1383 /* With the reset having already happened, close cannot fail */
1385 gve_close(priv->dev);
1386 gve_teardown_priv_resources(priv);
1389 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
1393 err = gve_init_priv(priv, true);
1397 err = gve_open(priv->dev);
1403 dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
1408 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
1410 bool was_up = netif_carrier_ok(priv->dev);
1413 dev_info(&priv->pdev->dev, "Performing reset\n");
1414 gve_clear_do_reset(priv);
1415 gve_set_reset_in_progress(priv);
1416 /* If we aren't attempting to teardown normally, just go turndown and
1419 if (!attempt_teardown) {
1421 gve_reset_and_teardown(priv, was_up);
1423 /* Otherwise attempt to close normally */
1425 err = gve_close(priv->dev);
1426 /* If that fails reset as we did above */
1428 gve_reset_and_teardown(priv, was_up);
1430 /* Clean up any remaining resources */
1431 gve_teardown_priv_resources(priv);
1434 /* Set it all back up */
1435 err = gve_reset_recovery(priv, was_up);
1436 gve_clear_reset_in_progress(priv);
1438 priv->interface_up_cnt = 0;
1439 priv->interface_down_cnt = 0;
1440 priv->stats_report_trigger_cnt = 0;
1444 static void gve_write_version(u8 __iomem *driver_version_register)
1446 const char *c = gve_version_prefix;
1449 writeb(*c, driver_version_register);
1453 c = gve_version_str;
1455 writeb(*c, driver_version_register);
1458 writeb('\n', driver_version_register);
1461 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1463 int max_tx_queues, max_rx_queues;
1464 struct net_device *dev;
1465 __be32 __iomem *db_bar;
1466 struct gve_registers __iomem *reg_bar;
1467 struct gve_priv *priv;
1470 err = pci_enable_device(pdev);
1474 err = pci_request_regions(pdev, "gvnic-cfg");
1476 goto abort_with_enabled;
1478 pci_set_master(pdev);
1480 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1482 dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
1483 goto abort_with_pci_region;
1486 reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
1488 dev_err(&pdev->dev, "Failed to map pci bar!\n");
1490 goto abort_with_pci_region;
1493 db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
1495 dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
1497 goto abort_with_reg_bar;
1500 gve_write_version(®_bar->driver_version);
1501 /* Get max queues to alloc etherdev */
1502 max_tx_queues = ioread32be(®_bar->max_tx_queues);
1503 max_rx_queues = ioread32be(®_bar->max_rx_queues);
1504 /* Alloc and setup the netdev and priv */
1505 dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
1507 dev_err(&pdev->dev, "could not allocate netdev\n");
1509 goto abort_with_db_bar;
1511 SET_NETDEV_DEV(dev, &pdev->dev);
1512 pci_set_drvdata(pdev, dev);
1513 dev->ethtool_ops = &gve_ethtool_ops;
1514 dev->netdev_ops = &gve_netdev_ops;
1516 /* Set default and supported features.
1518 * Features might be set in other locations as well (such as
1519 * `gve_adminq_describe_device`).
1521 dev->hw_features = NETIF_F_HIGHDMA;
1522 dev->hw_features |= NETIF_F_SG;
1523 dev->hw_features |= NETIF_F_HW_CSUM;
1524 dev->hw_features |= NETIF_F_TSO;
1525 dev->hw_features |= NETIF_F_TSO6;
1526 dev->hw_features |= NETIF_F_TSO_ECN;
1527 dev->hw_features |= NETIF_F_RXCSUM;
1528 dev->hw_features |= NETIF_F_RXHASH;
1529 dev->features = dev->hw_features;
1530 dev->watchdog_timeo = 5 * HZ;
1531 dev->min_mtu = ETH_MIN_MTU;
1532 netif_carrier_off(dev);
1534 priv = netdev_priv(dev);
1537 priv->msg_enable = DEFAULT_MSG_LEVEL;
1538 priv->reg_bar0 = reg_bar;
1539 priv->db_bar2 = db_bar;
1540 priv->service_task_flags = 0x0;
1541 priv->state_flags = 0x0;
1542 priv->ethtool_flags = 0x0;
1544 gve_set_probe_in_progress(priv);
1545 priv->gve_wq = alloc_ordered_workqueue("gve", 0);
1546 if (!priv->gve_wq) {
1547 dev_err(&pdev->dev, "Could not allocate workqueue");
1549 goto abort_with_netdev;
1551 INIT_WORK(&priv->service_task, gve_service_task);
1552 INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
1553 priv->tx_cfg.max_queues = max_tx_queues;
1554 priv->rx_cfg.max_queues = max_rx_queues;
1556 err = gve_init_priv(priv, false);
1560 err = register_netdev(dev);
1562 goto abort_with_gve_init;
1564 dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
1565 dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
1566 gve_clear_probe_in_progress(priv);
1567 queue_work(priv->gve_wq, &priv->service_task);
1570 abort_with_gve_init:
1571 gve_teardown_priv_resources(priv);
1574 destroy_workqueue(priv->gve_wq);
1580 pci_iounmap(pdev, db_bar);
1583 pci_iounmap(pdev, reg_bar);
1585 abort_with_pci_region:
1586 pci_release_regions(pdev);
1589 pci_disable_device(pdev);
1593 static void gve_remove(struct pci_dev *pdev)
1595 struct net_device *netdev = pci_get_drvdata(pdev);
1596 struct gve_priv *priv = netdev_priv(netdev);
1597 __be32 __iomem *db_bar = priv->db_bar2;
1598 void __iomem *reg_bar = priv->reg_bar0;
1600 unregister_netdev(netdev);
1601 gve_teardown_priv_resources(priv);
1602 destroy_workqueue(priv->gve_wq);
1603 free_netdev(netdev);
1604 pci_iounmap(pdev, db_bar);
1605 pci_iounmap(pdev, reg_bar);
1606 pci_release_regions(pdev);
1607 pci_disable_device(pdev);
1610 static const struct pci_device_id gve_id_table[] = {
1611 { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
1615 static struct pci_driver gvnic_driver = {
1617 .id_table = gve_id_table,
1619 .remove = gve_remove,
1622 module_pci_driver(gvnic_driver);
1624 MODULE_DEVICE_TABLE(pci, gve_id_table);
1625 MODULE_AUTHOR("Google, Inc.");
1626 MODULE_DESCRIPTION("gVNIC Driver");
1627 MODULE_LICENSE("Dual MIT/GPL");
1628 MODULE_VERSION(GVE_VERSION);