1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
4 * Copyright (C) 2015-2021 Google, Inc.
8 #include "gve_adminq.h"
10 #include <linux/etherdevice.h>
12 static void gve_rx_free_buffer(struct device *dev,
13 struct gve_rx_slot_page_info *page_info,
14 union gve_rx_data_slot *data_slot)
16 dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) &
17 GVE_DATA_SLOT_ADDR_PAGE_MASK);
19 page_ref_sub(page_info->page, page_info->pagecnt_bias - 1);
20 gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE);
23 static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
25 u32 slots = rx->mask + 1;
28 if (rx->data.raw_addressing) {
29 for (i = 0; i < slots; i++)
30 gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
31 &rx->data.data_ring[i]);
33 for (i = 0; i < slots; i++)
34 page_ref_sub(rx->data.page_info[i].page,
35 rx->data.page_info[i].pagecnt_bias - 1);
36 gve_unassign_qpl(priv, rx->data.qpl->id);
39 kvfree(rx->data.page_info);
40 rx->data.page_info = NULL;
43 static void gve_rx_free_ring(struct gve_priv *priv, int idx)
45 struct gve_rx_ring *rx = &priv->rx[idx];
46 struct device *dev = &priv->pdev->dev;
47 u32 slots = rx->mask + 1;
50 gve_rx_remove_from_block(priv, idx);
52 bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
53 dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
54 rx->desc.desc_ring = NULL;
56 dma_free_coherent(dev, sizeof(*rx->q_resources),
57 rx->q_resources, rx->q_resources_bus);
58 rx->q_resources = NULL;
60 gve_rx_unfill_pages(priv, rx);
62 bytes = sizeof(*rx->data.data_ring) * slots;
63 dma_free_coherent(dev, bytes, rx->data.data_ring,
65 rx->data.data_ring = NULL;
66 netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
69 static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
70 dma_addr_t addr, struct page *page, __be64 *slot_addr)
72 page_info->page = page;
73 page_info->page_offset = 0;
74 page_info->page_address = page_address(page);
75 *slot_addr = cpu_to_be64(addr);
76 /* The page already has 1 ref */
77 page_ref_add(page, INT_MAX - 1);
78 page_info->pagecnt_bias = INT_MAX;
81 static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
82 struct gve_rx_slot_page_info *page_info,
83 union gve_rx_data_slot *data_slot)
89 err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE);
93 gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
97 static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
99 struct gve_priv *priv = rx->gve;
104 /* Allocate one page per Rx queue slot. Each page is split into two
105 * packet buffers, when possible we "page flip" between the two.
107 slots = rx->mask + 1;
109 rx->data.page_info = kvzalloc(slots *
110 sizeof(*rx->data.page_info), GFP_KERNEL);
111 if (!rx->data.page_info)
114 if (!rx->data.raw_addressing) {
115 rx->data.qpl = gve_assign_rx_qpl(priv);
117 kvfree(rx->data.page_info);
118 rx->data.page_info = NULL;
122 for (i = 0; i < slots; i++) {
123 if (!rx->data.raw_addressing) {
124 struct page *page = rx->data.qpl->pages[i];
125 dma_addr_t addr = i * PAGE_SIZE;
127 gve_setup_rx_buffer(&rx->data.page_info[i], addr, page,
128 &rx->data.data_ring[i].qpl_offset);
131 err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i],
132 &rx->data.data_ring[i]);
140 gve_rx_free_buffer(&priv->pdev->dev,
141 &rx->data.page_info[i],
142 &rx->data.data_ring[i]);
146 static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
148 ctx->curr_frag_cnt = 0;
149 ctx->total_expected_size = 0;
150 ctx->expected_frag_cnt = 0;
151 ctx->skb_head = NULL;
152 ctx->skb_tail = NULL;
153 ctx->reuse_frags = false;
156 static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
158 struct gve_rx_ring *rx = &priv->rx[idx];
159 struct device *hdev = &priv->pdev->dev;
165 netif_dbg(priv, drv, priv->dev, "allocating rx ring\n");
166 /* Make sure everything is zeroed to start with */
167 memset(rx, 0, sizeof(*rx));
172 slots = priv->rx_data_slot_cnt;
173 rx->mask = slots - 1;
174 rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
176 /* alloc rx data ring */
177 bytes = sizeof(*rx->data.data_ring) * slots;
178 rx->data.data_ring = dma_alloc_coherent(hdev, bytes,
181 if (!rx->data.data_ring)
183 filled_pages = gve_prefill_rx_pages(rx);
184 if (filled_pages < 0) {
186 goto abort_with_slots;
188 rx->fill_cnt = filled_pages;
189 /* Ensure data ring slots (packet buffers) are visible. */
192 /* Alloc gve_queue_resources */
194 dma_alloc_coherent(hdev,
195 sizeof(*rx->q_resources),
196 &rx->q_resources_bus,
198 if (!rx->q_resources) {
202 netif_dbg(priv, drv, priv->dev, "rx[%d]->data.data_bus=%lx\n", idx,
203 (unsigned long)rx->data.data_bus);
205 /* alloc rx desc ring */
206 bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
207 npages = bytes / PAGE_SIZE;
208 if (npages * PAGE_SIZE != bytes) {
210 goto abort_with_q_resources;
213 rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus,
215 if (!rx->desc.desc_ring) {
217 goto abort_with_q_resources;
220 rx->db_threshold = priv->rx_desc_cnt / 2;
223 /* Allocating half-page buffers allows page-flipping which is faster
224 * than copying or allocating new pages.
226 rx->packet_buffer_size = PAGE_SIZE / 2;
227 gve_rx_ctx_clear(&rx->ctx);
228 gve_rx_add_to_block(priv, idx);
232 abort_with_q_resources:
233 dma_free_coherent(hdev, sizeof(*rx->q_resources),
234 rx->q_resources, rx->q_resources_bus);
235 rx->q_resources = NULL;
237 gve_rx_unfill_pages(priv, rx);
239 bytes = sizeof(*rx->data.data_ring) * slots;
240 dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus);
241 rx->data.data_ring = NULL;
246 int gve_rx_alloc_rings(struct gve_priv *priv)
251 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
252 err = gve_rx_alloc_ring(priv, i);
254 netif_err(priv, drv, priv->dev,
255 "Failed to alloc rx ring=%d: err=%d\n",
260 /* Unallocate if there was an error */
264 for (j = 0; j < i; j++)
265 gve_rx_free_ring(priv, j);
270 void gve_rx_free_rings_gqi(struct gve_priv *priv)
274 for (i = 0; i < priv->rx_cfg.num_queues; i++)
275 gve_rx_free_ring(priv, i);
278 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx)
280 u32 db_idx = be32_to_cpu(rx->q_resources->db_index);
282 iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]);
285 static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
287 if (likely(pkt_flags & (GVE_RXF_TCP | GVE_RXF_UDP)))
288 return PKT_HASH_TYPE_L4;
289 if (pkt_flags & (GVE_RXF_IPV4 | GVE_RXF_IPV6))
290 return PKT_HASH_TYPE_L3;
291 return PKT_HASH_TYPE_L2;
294 static u16 gve_rx_ctx_padding(struct gve_rx_ctx *ctx)
296 return (ctx->curr_frag_cnt == 0) ? GVE_RX_PAD : 0;
299 static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
300 struct gve_rx_slot_page_info *page_info,
301 u16 packet_buffer_size, u16 len,
302 struct gve_rx_ctx *ctx)
304 u32 offset = page_info->page_offset + gve_rx_ctx_padding(ctx);
308 ctx->skb_head = napi_get_frags(napi);
310 if (unlikely(!ctx->skb_head))
314 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
315 offset, len, packet_buffer_size);
320 static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
322 const __be64 offset = cpu_to_be64(PAGE_SIZE / 2);
324 /* "flip" to other packet buffer on this page */
325 page_info->page_offset ^= PAGE_SIZE / 2;
326 *(slot_addr) ^= offset;
329 static int gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info *page_info)
331 int pagecount = page_count(page_info->page);
333 /* This page is not being used by any SKBs - reuse */
334 if (pagecount == page_info->pagecnt_bias)
336 /* This page is still being used by an SKB - we can't reuse */
337 else if (pagecount > page_info->pagecnt_bias)
339 WARN(pagecount < page_info->pagecnt_bias,
340 "Pagecount should never be less than the bias.");
344 static struct sk_buff *
345 gve_rx_raw_addressing(struct device *dev, struct net_device *netdev,
346 struct gve_rx_slot_page_info *page_info, u16 len,
347 struct napi_struct *napi,
348 union gve_rx_data_slot *data_slot,
349 u16 packet_buffer_size, struct gve_rx_ctx *ctx)
351 struct sk_buff *skb = gve_rx_add_frags(napi, page_info, packet_buffer_size, len, ctx);
356 /* Optimistically stop the kernel from freeing the page.
357 * We will check again in refill to determine if we need to alloc a
360 gve_dec_pagecnt_bias(page_info);
365 static struct sk_buff *
366 gve_rx_qpl(struct device *dev, struct net_device *netdev,
367 struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info,
368 u16 len, struct napi_struct *napi,
369 union gve_rx_data_slot *data_slot)
371 struct gve_rx_ctx *ctx = &rx->ctx;
374 /* if raw_addressing mode is not enabled gvnic can only receive into
375 * registered segments. If the buffer can't be recycled, our only
376 * choice is to copy the data out of it so that we can return it to the
379 if (ctx->reuse_frags) {
380 skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
381 /* No point in recycling if we didn't get the skb */
383 /* Make sure that the page isn't freed. */
384 gve_dec_pagecnt_bias(page_info);
385 gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
388 const u16 padding = gve_rx_ctx_padding(ctx);
390 skb = gve_rx_copy(netdev, napi, page_info, len, padding, ctx);
392 u64_stats_update_begin(&rx->statss);
393 rx->rx_frag_copy_cnt++;
394 u64_stats_update_end(&rx->statss);
400 #define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x))
401 static u16 gve_rx_get_fragment_size(struct gve_rx_ctx *ctx, struct gve_rx_desc *desc)
403 return be16_to_cpu(desc->len) - gve_rx_ctx_padding(ctx);
406 static bool gve_rx_ctx_init(struct gve_rx_ctx *ctx, struct gve_rx_ring *rx)
408 bool qpl_mode = !rx->data.raw_addressing, packet_size_error = false;
409 bool buffer_error = false, desc_error = false, seqno_error = false;
410 struct gve_rx_slot_page_info *page_info;
411 struct gve_priv *priv = rx->gve;
412 u32 idx = rx->cnt & rx->mask;
413 bool reuse_frags, can_flip;
414 struct gve_rx_desc *desc;
419 /** In QPL mode, we only flip buffers when all buffers containing the packet
420 * can be flipped. RDA can_flip decisions will be made later, per frag.
423 reuse_frags = can_flip;
428 desc = &rx->desc.desc_ring[idx];
429 desc_error = unlikely(desc->flags_seq & GVE_RXF_ERR) || desc_error;
430 if (GVE_SEQNO(desc->flags_seq) != rx->desc.seqno) {
432 netdev_warn(priv->dev,
433 "RX seqno error: want=%d, got=%d, dropping packet and scheduling reset.",
434 rx->desc.seqno, GVE_SEQNO(desc->flags_seq));
436 frag_size = be16_to_cpu(desc->len);
437 packet_size += frag_size;
438 if (frag_size > rx->packet_buffer_size) {
439 packet_size_error = true;
440 netdev_warn(priv->dev,
441 "RX fragment error: packet_buffer_size=%d, frag_size=%d, droping packet.",
442 rx->packet_buffer_size, be16_to_cpu(desc->len));
444 page_info = &rx->data.page_info[idx];
446 recycle = gve_rx_can_recycle_buffer(page_info);
447 reuse_frags = reuse_frags && recycle > 0;
448 buffer_error = buffer_error || unlikely(recycle < 0);
450 idx = (idx + 1) & rx->mask;
451 rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
452 } while (GVE_PKTCONT_BIT_IS_SET(desc->flags_seq));
454 prefetch(rx->desc.desc_ring + idx);
456 ctx->curr_frag_cnt = 0;
457 ctx->total_expected_size = packet_size - GVE_RX_PAD;
458 ctx->expected_frag_cnt = n_frags;
459 ctx->skb_head = NULL;
460 ctx->reuse_frags = reuse_frags;
462 if (ctx->expected_frag_cnt > 1) {
463 u64_stats_update_begin(&rx->statss);
464 rx->rx_cont_packet_cnt++;
465 u64_stats_update_end(&rx->statss);
467 if (ctx->total_expected_size > priv->rx_copybreak && !ctx->reuse_frags && qpl_mode) {
468 u64_stats_update_begin(&rx->statss);
470 u64_stats_update_end(&rx->statss);
473 if (unlikely(buffer_error || seqno_error || packet_size_error)) {
474 gve_schedule_reset(priv);
478 if (unlikely(desc_error)) {
479 u64_stats_update_begin(&rx->statss);
480 rx->rx_desc_err_dropped_pkt++;
481 u64_stats_update_end(&rx->statss);
487 static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
488 struct gve_rx_slot_page_info *page_info, struct napi_struct *napi,
489 u16 len, union gve_rx_data_slot *data_slot)
491 struct net_device *netdev = priv->dev;
492 struct gve_rx_ctx *ctx = &rx->ctx;
493 struct sk_buff *skb = NULL;
495 if (len <= priv->rx_copybreak && ctx->expected_frag_cnt == 1) {
496 /* Just copy small packets */
497 skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD, ctx);
499 u64_stats_update_begin(&rx->statss);
501 rx->rx_frag_copy_cnt++;
502 rx->rx_copybreak_pkt++;
503 u64_stats_update_end(&rx->statss);
506 if (rx->data.raw_addressing) {
507 int recycle = gve_rx_can_recycle_buffer(page_info);
509 if (unlikely(recycle < 0)) {
510 gve_schedule_reset(priv);
513 page_info->can_flip = recycle;
514 if (page_info->can_flip) {
515 u64_stats_update_begin(&rx->statss);
516 rx->rx_frag_flip_cnt++;
517 u64_stats_update_end(&rx->statss);
519 skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
520 page_info, len, napi,
522 rx->packet_buffer_size, ctx);
524 if (ctx->reuse_frags) {
525 u64_stats_update_begin(&rx->statss);
526 rx->rx_frag_flip_cnt++;
527 u64_stats_update_end(&rx->statss);
529 skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
530 page_info, len, napi, data_slot);
536 static bool gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
537 u64 *packet_size_bytes, u32 *work_done)
539 struct gve_rx_slot_page_info *page_info;
540 struct gve_rx_ctx *ctx = &rx->ctx;
541 union gve_rx_data_slot *data_slot;
542 struct gve_priv *priv = rx->gve;
543 struct gve_rx_desc *first_desc;
544 struct sk_buff *skb = NULL;
545 struct gve_rx_desc *desc;
546 struct napi_struct *napi;
553 idx = rx->cnt & rx->mask;
554 first_desc = &rx->desc.desc_ring[idx];
556 napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
558 if (unlikely(!gve_rx_ctx_init(ctx, rx)))
561 while (ctx->curr_frag_cnt < ctx->expected_frag_cnt) {
562 /* Prefetch two packet buffers ahead, we will need it soon. */
563 page_info = &rx->data.page_info[(idx + 2) & rx->mask];
564 va = page_info->page_address + page_info->page_offset;
566 prefetch(page_info->page); /* Kernel page struct. */
567 prefetch(va); /* Packet header. */
568 prefetch(va + 64); /* Next cacheline too. */
570 len = gve_rx_get_fragment_size(ctx, desc);
572 page_info = &rx->data.page_info[idx];
573 data_slot = &rx->data.data_ring[idx];
574 page_bus = rx->data.raw_addressing ?
575 be64_to_cpu(data_slot->addr) - page_info->page_offset :
576 rx->data.qpl->page_buses[idx];
577 dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, PAGE_SIZE, DMA_FROM_DEVICE);
579 skb = gve_rx_skb(priv, rx, page_info, napi, len, data_slot);
581 u64_stats_update_begin(&rx->statss);
582 rx->rx_skb_alloc_fail++;
583 u64_stats_update_end(&rx->statss);
587 ctx->curr_frag_cnt++;
589 idx = rx->cnt & rx->mask;
591 desc = &rx->desc.desc_ring[idx];
594 if (likely(feat & NETIF_F_RXCSUM)) {
595 /* NIC passes up the partial sum */
596 if (first_desc->csum)
597 skb->ip_summed = CHECKSUM_COMPLETE;
599 skb->ip_summed = CHECKSUM_NONE;
600 skb->csum = csum_unfold(first_desc->csum);
603 /* parse flags & pass relevant info up */
604 if (likely(feat & NETIF_F_RXHASH) &&
605 gve_needs_rss(first_desc->flags_seq))
606 skb_set_hash(skb, be32_to_cpu(first_desc->rss_hash),
607 gve_rss_type(first_desc->flags_seq));
609 *packet_size_bytes = skb->len + (skb->protocol ? ETH_HLEN : 0);
610 *work_done = work_cnt;
611 if (skb_is_nonlinear(skb))
612 napi_gro_frags(napi);
614 napi_gro_receive(napi, skb);
616 gve_rx_ctx_clear(ctx);
621 napi_free_frags(napi);
622 *packet_size_bytes = 0;
623 *work_done = ctx->expected_frag_cnt;
624 while (ctx->curr_frag_cnt < ctx->expected_frag_cnt) {
626 ctx->curr_frag_cnt++;
628 gve_rx_ctx_clear(ctx);
632 bool gve_rx_work_pending(struct gve_rx_ring *rx)
634 struct gve_rx_desc *desc;
638 next_idx = rx->cnt & rx->mask;
639 desc = rx->desc.desc_ring + next_idx;
641 flags_seq = desc->flags_seq;
643 return (GVE_SEQNO(flags_seq) == rx->desc.seqno);
646 static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
648 int refill_target = rx->mask + 1;
649 u32 fill_cnt = rx->fill_cnt;
651 while (fill_cnt - rx->cnt < refill_target) {
652 struct gve_rx_slot_page_info *page_info;
653 u32 idx = fill_cnt & rx->mask;
655 page_info = &rx->data.page_info[idx];
656 if (page_info->can_flip) {
657 /* The other half of the page is free because it was
658 * free when we processed the descriptor. Flip to it.
660 union gve_rx_data_slot *data_slot =
661 &rx->data.data_ring[idx];
663 gve_rx_flip_buff(page_info, &data_slot->addr);
664 page_info->can_flip = 0;
666 /* It is possible that the networking stack has already
667 * finished processing all outstanding packets in the buffer
668 * and it can be reused.
669 * Flipping is unnecessary here - if the networking stack still
670 * owns half the page it is impossible to tell which half. Either
671 * the whole page is free or it needs to be replaced.
673 int recycle = gve_rx_can_recycle_buffer(page_info);
676 if (!rx->data.raw_addressing)
677 gve_schedule_reset(priv);
681 /* We can't reuse the buffer - alloc a new one*/
682 union gve_rx_data_slot *data_slot =
683 &rx->data.data_ring[idx];
684 struct device *dev = &priv->pdev->dev;
685 gve_rx_free_buffer(dev, page_info, data_slot);
686 page_info->page = NULL;
687 if (gve_rx_alloc_buffer(priv, dev, page_info,
689 u64_stats_update_begin(&rx->statss);
690 rx->rx_buf_alloc_fail++;
691 u64_stats_update_end(&rx->statss);
698 rx->fill_cnt = fill_cnt;
702 static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
703 netdev_features_t feat)
705 u32 work_done = 0, total_packet_cnt = 0, ok_packet_cnt = 0;
706 struct gve_priv *priv = rx->gve;
707 u32 idx = rx->cnt & rx->mask;
708 struct gve_rx_desc *desc;
711 desc = &rx->desc.desc_ring[idx];
712 while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) &&
713 work_done < budget) {
714 u64 packet_size_bytes = 0;
718 netif_info(priv, rx_status, priv->dev,
719 "[%d] idx=%d desc=%p desc->flags_seq=0x%x\n",
720 rx->q_num, idx, desc, desc->flags_seq);
721 netif_info(priv, rx_status, priv->dev,
722 "[%d] seqno=%d rx->desc.seqno=%d\n",
723 rx->q_num, GVE_SEQNO(desc->flags_seq),
726 dropped = !gve_rx(rx, feat, &packet_size_bytes, &work_cnt);
728 bytes += packet_size_bytes;
732 idx = rx->cnt & rx->mask;
733 desc = &rx->desc.desc_ring[idx];
734 work_done += work_cnt;
737 if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold)
741 u64_stats_update_begin(&rx->statss);
742 rx->rpackets += ok_packet_cnt;
744 u64_stats_update_end(&rx->statss);
747 /* restock ring slots */
748 if (!rx->data.raw_addressing) {
749 /* In QPL mode buffs are refilled as the desc are processed */
750 rx->fill_cnt += work_done;
751 } else if (rx->fill_cnt - rx->cnt <= rx->db_threshold) {
752 /* In raw addressing mode buffs are only refilled if the avail
753 * falls below a threshold.
755 if (!gve_rx_refill_buffers(priv, rx))
758 /* If we were not able to completely refill buffers, we'll want
759 * to schedule this queue for work again to refill buffers.
761 if (rx->fill_cnt - rx->cnt <= rx->db_threshold) {
762 gve_rx_write_doorbell(priv, rx);
767 gve_rx_write_doorbell(priv, rx);
768 return total_packet_cnt;
771 int gve_rx_poll(struct gve_notify_block *block, int budget)
773 struct gve_rx_ring *rx = block->rx;
774 netdev_features_t feat;
777 feat = block->napi.dev->features;
779 /* If budget is 0, do all the work */
784 work_done = gve_clean_rx_done(rx, budget, feat);